entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
MultiSampleDropout | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/aw/cawey5lw6xbokqu7xj3qhlyvs74fedc465d5srn5hlnzxzdj4pdh.py
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# logits => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %view_1, %view_1, %view_1, %view_1],), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x0 = xindex % 64
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + (64*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + (64*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0 + (64*((-12) + x1))), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr0 + (x0 + (64*((-16) + x1))), tmp21 & xmask, other=0.0)
tmp25 = tl.where(tmp19, tmp20, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tmp28 = tl.where(tmp4, tmp5, tmp27)
tl.store(out_ptr0 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/za/czaglc3wcibxkukejfcvvacbipsq7rts66rmcrjem6wnl47cm6dy.py
# Topologically Sorted Source Nodes: [logits_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# logits_1 => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_10, [0]), kwargs = {})
triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (768 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (1024 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 5.0
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((20, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, buf1, 1280, grid=grid(1280), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [logits_1], Original ATen: [aten.mean]
triton_poi_fused_mean_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MultiSampleDropout(nn.Module):
"""
# multisample dropout (wut): https://arxiv.org/abs/1905.09788
"""
def __init__(self, hidden_size, num_labels, K=5, p=0.5):
super().__init__()
self.K = K
self.dropout = nn.Dropout(p)
self.classifier = nn.Linear(hidden_size, num_labels)
def forward(self, input):
logits = torch.stack([self.classifier(self.dropout(input)) for _ in
range(self.K)], dim=0)
logits = torch.mean(logits, dim=0)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_labels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + 64 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + 64 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0 + 64 * (-12 + x1)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tl.full([1], 20, tl.int64)
tmp24 = tl.load(in_ptr0 + (x0 + 64 * (-16 + x1)), tmp21 & xmask, other=0.0)
tmp25 = tl.where(tmp19, tmp20, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tmp28 = tl.where(tmp4, tmp5, tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (768 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (1024 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 5.0
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((20, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(1280)](buf0, buf1, 1280, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_mean_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0)
class MultiSampleDropoutNew(nn.Module):
"""
# multisample dropout (wut): https://arxiv.org/abs/1905.09788
"""
def __init__(self, hidden_size, num_labels, K=5, p=0.5):
super().__init__()
self.K = K
self.dropout = nn.Dropout(p)
self.classifier = nn.Linear(hidden_size, num_labels)
def forward(self, input_0):
primals_2 = self.classifier.weight
primals_3 = self.classifier.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lonePatient/TorchBlocks | MultiSampleDropout | false | 15,957 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
MaxPoolWithMask | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7l/c7lf755234fc5ppe6husm6ia7ydy4h6j3pfa5cfj6uwsuzjt4jn4.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp23 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 <= tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -10000000000000.0
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp10 = tmp9 <= tmp2
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp11 * tmp5
tmp13 = tmp8 + tmp12
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp16 <= tmp2
tmp18 = tmp17.to(tl.float32)
tmp19 = tmp18 * tmp5
tmp20 = tmp15 + tmp19
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp23 <= tmp2
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp25 * tmp5
tmp27 = tmp22 + tmp26
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tl.store(out_ptr0 + (x2), tmp28, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg1_1, arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaxPoolWithMask(nn.Module):
"""
带mask矩阵的max pooling。在做max-pooling的时候不会考虑mask值为0的位置。
"""
def __init__(self):
super(MaxPoolWithMask, self).__init__()
self.inf = 10000000000000.0
def forward(self, tensor, mask, dim=1):
"""
:param torch.FloatTensor tensor: [batch_size, seq_len, channels] 初始tensor
:param torch.LongTensor mask: [batch_size, seq_len] 0/1的mask矩阵
:param int dim: 需要进行max pooling的维度
:return:
"""
masks = mask.view(mask.size(0), mask.size(1), -1)
masks = masks.expand(-1, -1, tensor.size(2)).float()
return torch.max(tensor + masks.le(0.5).float() * -self.inf, dim=dim)[0
]
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = 0.5
tmp3 = tmp1 <= tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -10000000000000.0
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp10 = tmp9 <= tmp2
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp11 * tmp5
tmp13 = tmp8 + tmp12
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp16 <= tmp2
tmp18 = tmp17.to(tl.float32)
tmp19 = tmp18 * tmp5
tmp20 = tmp15 + tmp19
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp23 <= tmp2
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp25 * tmp5
tmp27 = tmp22 + tmp26
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(16)](arg1_1, arg0_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MaxPoolWithMaskNew(nn.Module):
"""
带mask矩阵的max pooling。在做max-pooling的时候不会考虑mask值为0的位置。
"""
def __init__(self):
super(MaxPoolWithMaskNew, self).__init__()
self.inf = 10000000000000.0
def forward(self, input_0, input_1):
arg1_1 = input_0
arg0_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lonePatient/TorchBlocks | MaxPoolWithMask | false | 15,958 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
KdMseLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/eh/ceh3zwjx3khxd7czrwvbtn6sdreo4fwpszhg66cddw2tkpyqxw4u.py
# Topologically Sorted Source Nodes: [beta_logits_S, beta_logits_T, loss], Original ATen: [aten.div, aten.mse_loss]
# Source node to ATen node mapping:
# beta_logits_S => div_1
# beta_logits_T => div
# loss => mean, pow_1, sub
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %div), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_div_mse_loss_0 = async_compile.triton('triton_per_fused_div_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [beta_logits_S, beta_logits_T, loss], Original ATen: [aten.div, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_div_mse_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class KdMseLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits_S, logits_T, temperature=1):
"""
Calculate the mse loss between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
"""
if isinstance(temperature, torch.Tensor) and temperature.dim() > 0:
temperature = temperature.unsqueeze(-1)
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
loss = F.mse_loss(beta_logits_S, beta_logits_T)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KdMseLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lonePatient/TorchBlocks | KdMseLoss | false | 15,959 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
SKL | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5z/c5zv7j4dvexbdtir3xwd7yxehe5ztdgm4dv2lyzaw7u4zqdau2m4.py
# Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, truediv_2], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# log => log_2
# log_1 => log_3
# log_softmax => exp, log, sub_1, sum_1
# log_softmax_1 => exp_2, log_1, sub_3, sum_2
# mul => mul_2
# mul_1 => mul_3
# p => exp_1
# rp => neg
# ry => neg_1
# sub => sub_4
# sub_1 => sub_5
# sub_2 => sub_6
# sum_1 => sum_3
# truediv => mul, reciprocal
# truediv_1 => mul_1, reciprocal_1
# truediv_2 => div
# y => exp_3
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1e-08), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_4, 1e-08), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_2,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_1), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_3, 1e-08), kwargs = {})
# %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1.0), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-08), kwargs = {})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_3,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg, %neg_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_1, %sub_6), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 64), kwargs = {})
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1 = async_compile.triton('triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp26 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = 1e-08
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1, 1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tmp21 = tmp20 - tmp19
tmp22 = tmp21 + tmp15
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 + tmp15
tmp41 = tmp17 / tmp40
tmp42 = tmp41 * tmp19
tmp43 = tmp42 - tmp19
tmp44 = tmp43 + tmp15
tmp45 = tl_math.log(tmp44)
tmp46 = -tmp45
tmp47 = tmp24 - tmp46
tmp48 = tmp14 * tmp47
tmp49 = 2.0
tmp50 = tmp48 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = _tmp52 + tmp51
_tmp52 = tl.where(rmask, tmp53, _tmp52)
tmp52 = tl.sum(_tmp52, 1)[:, None]
tmp54 = 0.015625
tmp55 = tmp52 * tmp54
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp55, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_0.run(arg1_1, buf2, 256, grid=grid(256), stream=stream0)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [log_softmax, p, add, truediv, sub, add_1, log, rp, log_softmax_1, y, add_2, truediv_1, sub_1, add_3, log_1, ry, sub_2, mul, mul_1, sum_1, truediv_2], Original ATen: [aten._log_softmax, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.sub, aten.log, aten.neg, aten.sum, aten.div]
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1.run(buf5, buf0, buf2, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf2
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SKL(nn.Module):
def __init__(self, epsilon=1e-08):
super(SKL, self).__init__()
self.epsilon = epsilon
def forward(self, input, target):
logit = input.view(-1, input.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + self.epsilon) - 1 + self.epsilon).detach().log()
ry = -(1.0 / (y + self.epsilon) - 1 + self.epsilon).detach().log()
return (p * (rp - ry) * 2).sum() / bs
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp52 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp26 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = 1e-08
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1, 1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tmp21 = tmp20 - tmp19
tmp22 = tmp21 + tmp15
tmp23 = tl_math.log(tmp22)
tmp24 = -tmp23
tmp27 = tl_math.exp(tmp26)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tl_math.log(tmp36)
tmp38 = tmp25 - tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 + tmp15
tmp41 = tmp17 / tmp40
tmp42 = tmp41 * tmp19
tmp43 = tmp42 - tmp19
tmp44 = tmp43 + tmp15
tmp45 = tl_math.log(tmp44)
tmp46 = -tmp45
tmp47 = tmp24 - tmp46
tmp48 = tmp14 * tmp47
tmp49 = 2.0
tmp50 = tmp48 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = _tmp52 + tmp51
_tmp52 = tl.where(rmask, tmp53, _tmp52)
tmp52 = tl.sum(_tmp52, 1)[:, None]
tmp54 = 0.015625
tmp55 = tmp52 * tmp54
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp55, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_red_fused__log_softmax_add_div_exp_log_mul_neg_reciprocal_sub_sum_1[
grid(1)](buf5, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf2
return buf5,
class SKLNew(nn.Module):
def __init__(self, epsilon=1e-08):
super(SKLNew, self).__init__()
self.epsilon = epsilon
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lonePatient/TorchBlocks | SKL | false | 15,960 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
NetVLAD | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tb/ctbeeotfqzbneeewwh2aiay5657nsb5gfe5znphkkjrpdvh7ojsn.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# x => pow_1, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
triton_red_fused_linalg_vector_norm_0 = async_compile.triton('triton_red_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16384, 128],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 16384
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = (xindex // 4096)
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (524288*x1)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem3wix4vgwy6v3xetkshtsypczwxeq25iw3cfygu3e4pk5e7ljs.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div
# Graph fragment:
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = (yindex // 128)
y0 = yindex % 128
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4096*y1)), ymask, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tl.store(out_ptr0 + (y0 + (128*x2) + (524288*y1)), tmp5, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/su/csutzzbh5ywbuhez7hpyubwwnxv5ln4oabvd6xv3p72wcuwk6llv.py
# Topologically Sorted Source Nodes: [conv2d, soft_assign_1], Original ATen: [aten.convolution, aten._softmax]
# Source node to ATen node mapping:
# conv2d => convolution
# soft_assign_1 => amax, exp, sub, sum_2
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__softmax_convolution_2 = async_compile.triton('triton_per_fused__softmax_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16384, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16384
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + (64*x0)), None)
tmp1 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp2, None)
tl.store(out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr1 + (x0), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z6/cz67auhgj6ouvysufr2dmfgggoj46bnpzkdqhizlp7u5oaxaoier.py
# Topologically Sorted Source Nodes: [residual, residual_1, vlad], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual => sub_1
# residual_1 => mul
# vlad => sum_3
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_1), kwargs = {})
# %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_red_fused_mul_sub_sum_3 = async_compile.triton('triton_red_fused_mul_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32768, 4096],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32768
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 128
x2 = (xindex // 8192)
x4 = xindex % 8192
tmp1 = tl.load(in_ptr1 + (x4), None, eviction_policy='evict_last')
x1 = (xindex // 128) % 64
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*r3) + (524288*x2)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr2 + (x1 + (64*r3) + (262144*x2)), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr3 + (r3 + (4096*x2)), rmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr4 + (r3 + (4096*x2)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask, tmp12, _tmp11)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + (x5), tmp11, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3h/c3hg2viq3unkoxozbawckxzs34ckbvtfc4j3ubhtse6m7gwyo4qu.py
# Topologically Sorted Source Nodes: [vlad_1], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# vlad_1 => pow_3, pow_4, sum_4
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {})
triton_per_fused_linalg_vector_norm_4 = async_compile.triton('triton_per_fused_linalg_vector_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (128*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/my/cmyj2bqdix43glszeiusj3krdkr5777obr36h2zzlhv423ibpibt.py
# Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# vlad_3 => div_3, pow_5, pow_6, sum_5
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_4), kwargs = {})
triton_red_fused_div_linalg_vector_norm_5 = async_compile.triton('triton_red_fused_div_linalg_vector_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_linalg_vector_norm_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp9 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp12)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (r1 + (8192*x0)), tmp16, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_red_fused_linalg_vector_norm_0.run(primals_1, buf0, 16384, 128, grid=grid(16384), stream=stream0)
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_1, buf0, buf1, 512, 4096, grid=grid(512, 4096), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf3 = buf2; del buf2 # reuse
buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, soft_assign_1], Original ATen: [aten.convolution, aten._softmax]
triton_per_fused__softmax_convolution_2.run(buf3, primals_3, buf4, buf5, 16384, 64, grid=grid(16384), stream=stream0)
del primals_3
buf6 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual, residual_1, vlad], Original ATen: [aten.sub, aten.mul, aten.sum]
triton_red_fused_mul_sub_sum_3.run(buf1, primals_4, buf3, buf4, buf5, buf6, 32768, 4096, grid=grid(32768), stream=stream0)
buf7 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32)
buf8 = reinterpret_tensor(buf7, (4, 64, 1), (64, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [vlad_1], Original ATen: [aten.linalg_vector_norm]
triton_per_fused_linalg_vector_norm_4.run(buf8, buf6, 256, 128, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 1), (1, 1), 0); del buf9 # reuse
buf11 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32)
# Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_red_fused_div_linalg_vector_norm_5.run(buf10, buf6, buf8, buf11, 4, 8192, grid=grid(4), stream=stream0)
return (buf11, primals_2, primals_4, buf1, buf3, buf4, buf5, buf6, buf8, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 128, 64, 64), (524288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as func
import torch.nn as nn
class NetVLAD(nn.Module):
"""
NetVLAD layer implementation
Credits: https://github.com/lyakaap/NetVLAD-pytorch
"""
def __init__(self, num_clusters=64, dim=128, alpha=100.0,
normalize_input=True):
"""
Args:
num_clusters: number of clusters.
dim: dimension of descriptors.
alpha: parameter of initialization. Larger is harder assignment.
normalize_input: if true, descriptor-wise L2 normalization
is applied to input.
"""
super().__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = alpha
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
self._init_params()
def _init_params(self):
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids)
.unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1))
def forward(self, x):
N, C = x.shape[:2]
if self.normalize_input:
x = func.normalize(x, p=2, dim=1)
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = func.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1)
residual = x_flatten.expand(self.num_clusters, -1, -1, -1).permute(
1, 0, 2, 3) - self.centroids.expand(x_flatten.size(-1), -1, -1
).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign.unsqueeze(2)
vlad = residual.sum(dim=-1)
vlad = func.normalize(vlad, p=2, dim=2)
vlad = vlad.view(x.size(0), -1)
vlad = func.normalize(vlad, p=2, dim=1)
return vlad
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = xindex // 4096
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = yindex // 128
y0 = yindex % 128
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tl.store(out_ptr0 + (y0 + 128 * x2 + 524288 * y1), tmp5, ymask)
@triton.jit
def triton_per_fused__softmax_convolution_2(in_out_ptr0, in_ptr0, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 64 * x0), None)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp2, None)
tl.store(out_ptr0 + x0, tmp5, None)
tl.store(out_ptr1 + x0, tmp10, None)
@triton.jit
def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 128
x2 = xindex // 8192
x4 = xindex % 8192
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
x1 = xindex // 128 % 64
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r3 + 524288 * x2), rmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr2 + (x1 + 64 * r3 + 262144 * x2), rmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr3 + (r3 + 4096 * x2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr4 + (r3 + 4096 * x2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask, tmp12, _tmp11)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + x5, tmp11, None)
@triton.jit
def triton_per_fused_linalg_vector_norm_4(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp9 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp12)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
get_raw_stream(0)
triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0,
16384, 128, XBLOCK=64, RBLOCK=4, num_warps=8, num_stages=1)
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.float32)
triton_poi_fused_div_1[grid(512, 4096)](primals_1, buf0, buf1, 512,
4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32)
triton_per_fused__softmax_convolution_2[grid(16384)](buf3,
primals_3, buf4, buf5, 16384, 64, XBLOCK=32, num_warps=8,
num_stages=1)
del primals_3
buf6 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32)
triton_red_fused_mul_sub_sum_3[grid(32768)](buf1, primals_4, buf3,
buf4, buf5, buf6, 32768, 4096, XBLOCK=8, RBLOCK=256, num_warps=
16, num_stages=1)
buf7 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32)
buf8 = reinterpret_tensor(buf7, (4, 64, 1), (64, 1, 1), 0)
del buf7
triton_per_fused_linalg_vector_norm_4[grid(256)](buf8, buf6, 256,
128, XBLOCK=8, num_warps=8, num_stages=1)
buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 1), (1, 1), 0)
del buf9
buf11 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32)
triton_red_fused_div_linalg_vector_norm_5[grid(4)](buf10, buf6,
buf8, buf11, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
return (buf11, primals_2, primals_4, buf1, buf3, buf4, buf5, buf6, buf8,
buf10)
class NetVLADNew(nn.Module):
"""
NetVLAD layer implementation
Credits: https://github.com/lyakaap/NetVLAD-pytorch
"""
def __init__(self, num_clusters=64, dim=128, alpha=100.0,
normalize_input=True):
"""
Args:
num_clusters: number of clusters.
dim: dimension of descriptors.
alpha: parameter of initialization. Larger is harder assignment.
normalize_input: if true, descriptor-wise L2 normalization
is applied to input.
"""
super().__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = alpha
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=True)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
self._init_params()
def _init_params(self):
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.centroids)
.unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm(dim=1))
def forward(self, input_0):
primals_4 = self.centroids
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| liuyuzhenn/LISRD | NetVLAD | false | 15,961 | [
"MIT"
] | 225 | bfd890b81defebea971db0b744be617ed58f5ffa | https://github.com/liuyuzhenn/LISRD/tree/bfd890b81defebea971db0b744be617ed58f5ffa |
GaussianSmearing | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gt/cgtswrwajmw6l67smxr5catfzti522sr5dptsecpxgogakajsajo.py
# Topologically Sorted Source Nodes: [dist, pow_1, mul, exp], Original ATen: [aten.sub, aten.pow, aten.mul, aten.exp]
# Source node to ATen node mapping:
# dist => sub
# exp => exp
# mul => mul
# pow_1 => pow_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %pow_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
triton_poi_fused_exp_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_exp_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 50)
x0 = xindex % 50
x2 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tmp1 * tmp5
tmp7 = tl_math.exp(tmp6)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 50), (3200, 800, 200, 50, 1), torch.float32)
# Topologically Sorted Source Nodes: [dist, pow_1, mul, exp], Original ATen: [aten.sub, aten.pow, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_mul_pow_sub_0.run(primals_3, primals_1, primals_2, buf0, 12800, grid=grid(12800), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class GaussianSmearing(nn.Module):
def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0, num_rbf=50,
trainable=True):
super(GaussianSmearing, self).__init__()
self.cutoff_lower = cutoff_lower
self.cutoff_upper = cutoff_upper
self.num_rbf = num_rbf
self.trainable = trainable
offset, coeff = self._initial_params()
if trainable:
self.register_parameter('coeff', nn.Parameter(coeff))
self.register_parameter('offset', nn.Parameter(offset))
else:
self.register_buffer('coeff', coeff)
self.register_buffer('offset', offset)
def _initial_params(self):
offset = torch.linspace(self.cutoff_lower, self.cutoff_upper, self.
num_rbf)
coeff = -0.5 / (offset[1] - offset[0]) ** 2
return offset, coeff
def reset_parameters(self):
offset, coeff = self._initial_params()
self.offset.data.copy_(offset)
self.coeff.data.copy_(coeff)
def forward(self, dist):
dist = dist.unsqueeze(-1) - self.offset
return torch.exp(self.coeff * torch.pow(dist, 2))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 50
x0 = xindex % 50
x2 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tmp1 * tmp5
tmp7 = tl_math.exp(tmp6)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 50), (3200, 800, 200, 50, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_pow_sub_0[grid(12800)](primals_3,
primals_1, primals_2, buf0, 12800, XBLOCK=128, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, buf0
class GaussianSmearingNew(nn.Module):
def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0, num_rbf=50,
trainable=True):
super(GaussianSmearingNew, self).__init__()
self.cutoff_lower = cutoff_lower
self.cutoff_upper = cutoff_upper
self.num_rbf = num_rbf
self.trainable = trainable
offset, coeff = self._initial_params()
if trainable:
self.register_parameter('coeff', nn.Parameter(coeff))
self.register_parameter('offset', nn.Parameter(offset))
else:
self.register_buffer('coeff', coeff)
self.register_buffer('offset', offset)
def _initial_params(self):
offset = torch.linspace(self.cutoff_lower, self.cutoff_upper, self.
num_rbf)
coeff = -0.5 / (offset[1] - offset[0]) ** 2
return offset, coeff
def reset_parameters(self):
offset, coeff = self._initial_params()
self.offset.data.copy_(offset)
self.coeff.data.copy_(coeff)
def forward(self, input_0):
primals_3 = self.coeff
primals_2 = self.offset
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lsnty5190/torchmd-net | GaussianSmearing | false | 15,962 | [
"MIT"
] | 51 | 0bedf43801f0c7d38900d8e1db778fe69f3a4d01 | https://github.com/lsnty5190/torchmd-net/tree/0bedf43801f0c7d38900d8e1db778fe69f3a4d01 |
GatedConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mg/cmgchvsrxolpftbtxzv5huur3ggva2z3x33a3ocfgaarb6opxcfp.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [3], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 7) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gd/cgd72kiulohldp6hcvr4adusxq5ed64akgmfg26p5xk5ta6eldyr.py
# Topologically Sorted Source Nodes: [mask_1, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mask_1 => sigmoid
# mul => mul
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (7*x1) + (56*x2)), xmask)
tmp2 = tl.load(in_ptr0 + (28 + x0 + (7*x1) + (56*x2)), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp2 * tmp1
tl.store(out_ptr0 + (x3), tmp1, xmask)
tl.store(out_ptr1 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 7), (56, 7, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 224, grid=grid(224), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mask_1, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
return (buf3, primals_1, primals_3, reinterpret_tensor(buf1, (4, 4, 4), (56, 7, 1), 28), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaskedConv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
groups=1, bias=True, causal=True):
if causal:
padding = (kernel_size - 1) * dilation
else:
padding = (kernel_size - 1) * dilation // 2
super(MaskedConv1d, self).__init__(in_channels, out_channels,
kernel_size, stride=1, padding=padding, dilation=dilation,
groups=groups, bias=bias)
def forward(self, inputs):
output = super(MaskedConv1d, self).forward(inputs)
return output[:, :, :inputs.size(2)]
class GatedConv1d(MaskedConv1d):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
groups=1, bias=True, causal=True):
super(GatedConv1d, self).__init__(in_channels, 2 * out_channels,
kernel_size, dilation, groups, bias, causal)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
output = super(GatedConv1d, self).forward(inputs)
mask, output = output.chunk(2, 1)
mask = self.sigmoid(mask)
return output * mask
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 7 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 7 * x1 + 56 * x2), xmask)
tmp2 = tl.load(in_ptr0 + (28 + x0 + 7 * x1 + 56 * x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp2 * tmp1
tl.store(out_ptr0 + x3, tmp1, xmask)
tl.store(out_ptr1 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 7), (56, 7, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(224)](buf1, primals_2, 224,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf3, primals_1, primals_3, reinterpret_tensor(buf1, (4, 4, 4),
(56, 7, 1), 28), buf2
class MaskedConv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
groups=1, bias=True, causal=True):
if causal:
padding = (kernel_size - 1) * dilation
else:
padding = (kernel_size - 1) * dilation // 2
super(MaskedConv1d, self).__init__(in_channels, out_channels,
kernel_size, stride=1, padding=padding, dilation=dilation,
groups=groups, bias=bias)
def forward(self, inputs):
output = super(MaskedConv1d, self).forward(inputs)
return output[:, :, :inputs.size(2)]
class GatedConv1dNew(MaskedConv1d):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
groups=1, bias=True, causal=True):
super(GatedConv1dNew, self).__init__(in_channels, 2 * out_channels,
kernel_size, dilation, groups, bias, causal)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lonePatient/TorchBlocks | GatedConv1d | false | 15,963 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
ExpNormalSmearing | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2b/c2bm5664wi7zvbke7gwfeiakuapvcq7cpije3apjmoe2ggjtvbju.py
# Topologically Sorted Source Nodes: [mul, truediv, cos, add, cutoffs, lt, float_1, cutoffs_1, neg, neg_1, add_1, mul_3, exp, sub, pow_1, mul_4, exp_1, mul_5], Original ATen: [aten.mul, aten.div, aten.cos, aten.add, aten.lt, aten._to_copy, aten.neg, aten.exp, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# cos => cos
# cutoffs => mul_1
# cutoffs_1 => mul_2
# exp => exp
# exp_1 => exp_1
# float_1 => convert_element_type
# lt => lt
# mul => mul
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# neg => neg
# neg_1 => neg_1
# pow_1 => pow_1
# sub => sub
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, 3.141592653589793), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 5.0), kwargs = {})
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cos, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%unsqueeze, 5.0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %convert_element_type), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%primals_2,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%unsqueeze,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, %primals_3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %pow_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_4,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %exp_1), kwargs = {})
triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0 = async_compile.triton('triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 50)
x0 = xindex % 50
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 3.141592653589793
tmp2 = tmp0 * tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tl_math.cos(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 5.0
tmp11 = tmp0 < tmp10
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp9 * tmp12
tmp15 = -tmp14
tmp16 = -tmp0
tmp17 = 0.0
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp6
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp15 * tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp13 * tmp25
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (50, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 50), (3200, 800, 200, 50, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, cos, add, cutoffs, lt, float_1, cutoffs_1, neg, neg_1, add_1, mul_3, exp, sub, pow_1, mul_4, exp_1, mul_5], Original ATen: [aten.mul, aten.div, aten.cos, aten.add, aten.lt, aten._to_copy, aten.neg, aten.exp, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0.run(primals_1, primals_2, primals_3, buf0, 12800, grid=grid(12800), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
class CosineCutoff(nn.Module):
def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0):
super(CosineCutoff, self).__init__()
self.cutoff_lower = cutoff_lower
self.cutoff_upper = cutoff_upper
def forward(self, distances):
if self.cutoff_lower > 0:
cutoffs = 0.5 * (torch.cos(math.pi * (2 * (distances - self.
cutoff_lower) / (self.cutoff_upper - self.cutoff_lower) +
1.0)) + 1.0)
cutoffs = cutoffs * (distances < self.cutoff_upper).float()
cutoffs = cutoffs * (distances > self.cutoff_lower).float()
return cutoffs
else:
cutoffs = 0.5 * (torch.cos(distances * math.pi / self.
cutoff_upper) + 1.0)
cutoffs = cutoffs * (distances < self.cutoff_upper).float()
return cutoffs
class ExpNormalSmearing(nn.Module):
def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0, num_rbf=50,
trainable=True):
super(ExpNormalSmearing, self).__init__()
self.cutoff_lower = cutoff_lower
self.cutoff_upper = cutoff_upper
self.num_rbf = num_rbf
self.trainable = trainable
self.cutoff_fn = CosineCutoff(0, cutoff_upper)
self.alpha = 5.0 / (cutoff_upper - cutoff_lower)
means, betas = self._initial_params()
if trainable:
self.register_parameter('means', nn.Parameter(means))
self.register_parameter('betas', nn.Parameter(betas))
else:
self.register_buffer('means', means)
self.register_buffer('betas', betas)
def _initial_params(self):
start_value = torch.exp(torch.scalar_tensor(-self.cutoff_upper +
self.cutoff_lower))
means = torch.linspace(start_value, 1, self.num_rbf)
betas = torch.tensor([(2 / self.num_rbf * (1 - start_value)) ** -2] *
self.num_rbf)
return means, betas
def reset_parameters(self):
means, betas = self._initial_params()
self.means.data.copy_(means)
self.betas.data.copy_(betas)
def forward(self, dist):
dist = dist.unsqueeze(-1)
return self.cutoff_fn(dist) * torch.exp(-self.betas * (torch.exp(
self.alpha * (-dist + self.cutoff_lower)) - self.means) ** 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 50
x0 = xindex % 50
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp1 = 3.141592653589793
tmp2 = tmp0 * tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp5 = tl_math.cos(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 5.0
tmp11 = tmp0 < tmp10
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp9 * tmp12
tmp15 = -tmp14
tmp16 = -tmp0
tmp17 = 0.0
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp6
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp15 * tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp13 * tmp25
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (50,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 50), (3200, 800, 200, 50, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_add_cos_div_exp_lt_mul_neg_pow_sub_0[grid
(12800)](primals_1, primals_2, primals_3, buf0, 12800, XBLOCK=
256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3
class CosineCutoff(nn.Module):
def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0):
super(CosineCutoff, self).__init__()
self.cutoff_lower = cutoff_lower
self.cutoff_upper = cutoff_upper
def forward(self, distances):
if self.cutoff_lower > 0:
cutoffs = 0.5 * (torch.cos(math.pi * (2 * (distances - self.
cutoff_lower) / (self.cutoff_upper - self.cutoff_lower) +
1.0)) + 1.0)
cutoffs = cutoffs * (distances < self.cutoff_upper).float()
cutoffs = cutoffs * (distances > self.cutoff_lower).float()
return cutoffs
else:
cutoffs = 0.5 * (torch.cos(distances * math.pi / self.
cutoff_upper) + 1.0)
cutoffs = cutoffs * (distances < self.cutoff_upper).float()
return cutoffs
class ExpNormalSmearingNew(nn.Module):
def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0, num_rbf=50,
trainable=True):
super(ExpNormalSmearingNew, self).__init__()
self.cutoff_lower = cutoff_lower
self.cutoff_upper = cutoff_upper
self.num_rbf = num_rbf
self.trainable = trainable
self.cutoff_fn = CosineCutoff(0, cutoff_upper)
self.alpha = 5.0 / (cutoff_upper - cutoff_lower)
means, betas = self._initial_params()
if trainable:
self.register_parameter('means', nn.Parameter(means))
self.register_parameter('betas', nn.Parameter(betas))
else:
self.register_buffer('means', means)
self.register_buffer('betas', betas)
def _initial_params(self):
start_value = torch.exp(torch.scalar_tensor(-self.cutoff_upper +
self.cutoff_lower))
means = torch.linspace(start_value, 1, self.num_rbf)
betas = torch.tensor([(2 / self.num_rbf * (1 - start_value)) ** -2] *
self.num_rbf)
return means, betas
def reset_parameters(self):
means, betas = self._initial_params()
self.means.data.copy_(means)
self.betas.data.copy_(betas)
def forward(self, input_0):
primals_2 = self.means
primals_3 = self.betas
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lsnty5190/torchmd-net | ExpNormalSmearing | false | 15,964 | [
"MIT"
] | 51 | 0bedf43801f0c7d38900d8e1db778fe69f3a4d01 | https://github.com/lsnty5190/torchmd-net/tree/0bedf43801f0c7d38900d8e1db778fe69f3a4d01 |
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/py/cpyk5b6mlotalxultdmmebirncntxpef6b6uohknjluaz76jqen3.py
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# distance_negative => sum_2
# distance_positive => sum_1
# losses => relu
# mean => mean
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 4), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_add_mean_pow_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_mean_pow_relu_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr2 + (r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 4.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 4.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| lxy5513/cvToolkit | TripletLoss | false | 15,965 | [
"MIT"
] | 47 | 51586c8016b47f5e7852032f9f3211c89d80f537 | https://github.com/lxy5513/cvToolkit/tree/51586c8016b47f5e7852032f9f3211c89d80f537 |
AxialPositionalEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hf/chfg2arscesam5vn7l6d6c76iujfzvzhfrjjfuw4h7prdbjytpn2.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# x_1 => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = (xindex // 4) % 16
x0 = xindex % 4
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 4), (16, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
del primals_3
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, shape, emb_dim_index=1):
super().__init__()
total_dimensions = len(shape) + 2
ax_dim_indexes = [i for i in range(1, total_dimensions) if i !=
emb_dim_index]
self.num_axials = len(shape)
for i, (axial_dim, axial_dim_index) in enumerate(zip(shape,
ax_dim_indexes)):
shape = [1] * total_dimensions
shape[emb_dim_index] = dim
shape[axial_dim_index] = axial_dim
parameter = nn.Parameter(torch.randn(*shape))
setattr(self, f'param_{i}', parameter)
def forward(self, x):
for i in range(self.num_axials):
x = x + getattr(self, f'param_{i}')
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'shape': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = xindex // 4 % 16
x0 = xindex % 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 4), (16, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, primals_3,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
return buf0,
class AxialPositionalEmbeddingNew(nn.Module):
def __init__(self, dim, shape, emb_dim_index=1):
super().__init__()
total_dimensions = len(shape) + 2
ax_dim_indexes = [i for i in range(1, total_dimensions) if i !=
emb_dim_index]
self.num_axials = len(shape)
for i, (axial_dim, axial_dim_index) in enumerate(zip(shape,
ax_dim_indexes)):
shape = [1] * total_dimensions
shape[emb_dim_index] = dim
shape[axial_dim_index] = axial_dim
parameter = nn.Parameter(torch.randn(*shape))
setattr(self, f'param_{i}', parameter)
def forward(self, input_0):
primals_1 = self.param_0
primals_3 = self.param_1
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lucidrains/axial-attention | AxialPositionalEmbedding | false | 15,966 | [
"MIT"
] | 189 | eff2c10c2e76c735a70a6b995b571213adffbbb7 | https://github.com/lucidrains/axial-attention/tree/eff2c10c2e76c735a70a6b995b571213adffbbb7 |
AttCeMeanLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/em/cemtbxj46jzc7s3a3pexakdfg5dv3w7chdxynyt6hemfgtua425c.py
# Topologically Sorted Source Nodes: [attention_T, probs_T], Original ATen: [aten.mean, aten._softmax]
# Source node to ATen node mapping:
# attention_T => mean_1
# probs_T => amax
# Graph fragment:
# %mean_1 : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [1]), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mean_1, [-1], True), kwargs = {})
triton_poi_fused__softmax_mean_0 = async_compile.triton('triton_poi_fused__softmax_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (16 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (48 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (17 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (49 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (2 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (18 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (34 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (50 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (3 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (19 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (35 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (51 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = triton_helpers.maximum(tmp17, tmp25)
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = triton_helpers.maximum(tmp26, tmp34)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z7/cz76prezjig7qgxgvguiadouzcb27eld52u7pnldapfz7dkkbbyq.py
# Topologically Sorted Source Nodes: [attention_T, probs_T], Original ATen: [aten.mean, aten._softmax]
# Source node to ATen node mapping:
# attention_T => mean_1
# probs_T => exp, sub
# Graph fragment:
# %mean_1 : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [1]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_mean_1 = async_compile.triton('triton_poi_fused__softmax_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp9 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + (x5), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yq/cyqmbvhphd3h2m25wdbpkegnclceflojku56mtlgw532pxonenqo.py
# Topologically Sorted Source Nodes: [attention_T, le, zeros_like, probs_T, probs_T_select], Original ATen: [aten.mean, aten.le, aten.zeros_like, aten._softmax, aten.where]
# Source node to ATen node mapping:
# attention_T => mean_1
# le => le
# probs_T => div, sum_1
# probs_T_select => where
# zeros_like => full_default
# Graph fragment:
# %mean_1 : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [1]), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mean_1, -0.001), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %div), kwargs = {})
triton_poi_fused__softmax_le_mean_where_zeros_like_2 = async_compile.triton('triton_poi_fused__softmax_le_mean_where_zeros_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_le_mean_where_zeros_like_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_le_mean_where_zeros_like_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp11 = tl.load(in_ptr1 + (x4), xmask)
tmp12 = tl.load(in_ptr1 + (4*x5), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + (4*x5)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x5)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x5)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = -0.001
tmp10 = tmp8 <= tmp9
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp11 / tmp18
tmp20 = 0.0
tmp21 = tl.where(tmp10, tmp20, tmp19)
tl.store(out_ptr0 + (x4), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ls/clsydhccrbryx7iijzr734m7b6gibnkbl2hyvsz543lktldvszu7.py
# Topologically Sorted Source Nodes: [attention_S, log_softmax], Original ATen: [aten.mean, aten._log_softmax]
# Source node to ATen node mapping:
# attention_S => mean
# log_softmax => sub_1
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [1]), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %amax_1), kwargs = {})
triton_poi_fused__log_softmax_mean_3 = async_compile.triton('triton_poi_fused__log_softmax_mean_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_mean_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp9 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lv/clvmtaauzcncejtlfmicmvxyo42zrk3gwtzdcapoiwcf7iqbykao.py
# Topologically Sorted Source Nodes: [log_softmax, mul, sum_1, mean_2, loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.mean, aten.neg]
# Source node to ATen node mapping:
# log_softmax => exp_1, log, sub_2, sum_2
# loss => neg
# mean_2 => mean_2
# mul => mul
# sum_1 => sum_3
# Graph fragment:
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %sub_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_3,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_2,), kwargs = {})
triton_per_fused__log_softmax_mean_mul_neg_sum_4 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp30 = 16.0
tmp31 = tmp29 / tmp30
tmp32 = -tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attention_T, probs_T], Original ATen: [aten.mean, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_mean_0.run(arg1_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_T, probs_T], Original ATen: [aten.mean, aten._softmax]
triton_poi_fused__softmax_mean_1.run(arg1_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_T, le, zeros_like, probs_T, probs_T_select], Original ATen: [aten.mean, aten.le, aten.zeros_like, aten._softmax, aten.where]
triton_poi_fused__softmax_le_mean_where_zeros_like_2.run(arg1_1, buf1, buf2, 64, grid=grid(64), stream=stream0)
del arg1_1
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_S, log_softmax], Original ATen: [aten.mean, aten._log_softmax]
triton_poi_fused__softmax_mean_0.run(arg0_1, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attention_S, log_softmax], Original ATen: [aten.mean, aten._log_softmax]
triton_poi_fused__log_softmax_mean_3.run(arg0_1, buf3, buf4, 64, grid=grid(64), stream=stream0)
del arg0_1
del buf3
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [log_softmax, mul, sum_1, mean_2, loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.mean, aten.neg]
triton_per_fused__log_softmax_mean_mul_neg_sum_4.run(buf6, buf2, buf4, 1, 16, grid=grid(1), stream=stream0)
del buf2
del buf4
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AttCeMeanLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, attention_S, attention_T, mask=None):
"""
Calculate the cross entropy between attention_S and attention_T, the dim of num_heads is averaged
:param logits_S: Tensor of shape (batch_size, num_heads, length, length) or (batch_size, length, length)
:param logits_T: Tensor of shape (batch_size, num_heads, length, length) or (batch_size, length, length)
:param mask: Tensor of shape (batch_size, length)
"""
if len(attention_S.size()) == 4:
attention_S = attention_S.mean(dim=1)
attention_T = attention_T.mean(dim=1)
probs_T = F.softmax(attention_T, dim=-1)
if mask is None:
probs_T_select = torch.where(attention_T <= -0.001, torch.
zeros_like(attention_T), probs_T)
loss = -(probs_T_select * F.log_softmax(attention_S, dim=-1)).sum(
dim=-1).mean()
else:
mask = mask
loss = -((probs_T * F.log_softmax(attention_S, dim=-1) * mask.
unsqueeze(1)).sum(dim=-1) * mask).sum() / mask.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (16 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (48 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x1), xmask, eviction_policy
='evict_last')
tmp10 = tl.load(in_ptr0 + (17 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (49 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (18 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (34 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (50 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (19 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (35 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (51 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = triton_helpers.maximum(tmp17, tmp25)
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = triton_helpers.maximum(tmp26, tmp34)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused__softmax_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp9 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + x5, tmp11, xmask)
@triton.jit
def triton_poi_fused__softmax_le_mean_where_zeros_like_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr1 + x4, xmask)
tmp12 = tl.load(in_ptr1 + 4 * x5, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = -0.001
tmp10 = tmp8 <= tmp9
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp11 / tmp18
tmp20 = 0.0
tmp21 = tl.where(tmp10, tmp20, tmp19)
tl.store(out_ptr0 + x4, tmp21, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mean_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp9 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_4(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp30 = 16.0
tmp31 = tmp29 / tmp30
tmp32 = -tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mean_0[grid(16)](arg1_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_mean_1[grid(64)](arg1_1, buf0, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_le_mean_where_zeros_like_2[grid(64)](arg1_1,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
buf3 = buf0
del buf0
triton_poi_fused__softmax_mean_0[grid(16)](arg0_1, buf3, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused__log_softmax_mean_3[grid(64)](arg0_1, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf3
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused__log_softmax_mean_mul_neg_sum_4[grid(1)](buf6,
buf2, buf4, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
return buf6,
class AttCeMeanLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lonePatient/TorchBlocks | AttCeMeanLoss | false | 15,967 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
LayerNormChan | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xc/cxccaea6m5natjlarhaiguykaufo3fs2d4lmmez7n342egyrrguk.py
# Topologically Sorted Source Nodes: [var, mean, sub, add, sqrt, truediv, mul, add_1], Original ATen: [aten.var, aten.mean, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# sqrt => sqrt
# sub => sub
# truediv => div
# var => var
# Graph fragment:
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + (x3), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [var, mean, sub, add, sqrt, truediv, mul, add_1], Original ATen: [aten.var, aten.mean, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class LayerNormChan(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
var = torch.var(x, dim=1, unbiased=False, keepdim=True)
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) / (var + self.eps).sqrt() * self.g + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x3, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormChanNew(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, input_0):
primals_2 = self.g
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lucidrains/nuwa-pytorch | LayerNormChan | false | 15,968 | [
"MIT"
] | 310 | bf1f3dc1126ba0a24a280bd7412a8082e5013b46 | https://github.com/lucidrains/nuwa-pytorch/tree/bf1f3dc1126ba0a24a280bd7412a8082e5013b46 |
KdCeLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mv/cmvzep7nhhwtxuicw6hq3r6xogymhf57einyr726gq5z362nbzbo.py
# Topologically Sorted Source Nodes: [p_T], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_T => exp
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [-1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ig/cig5rd7e7v7tsxvukbwxsclne5itbnjqmhrzbzazhtsv4kpwehpn.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ex/cexyp56w7yrc5etz7qzfxojrhowoty3iad66srertlhj4y4ii3c7.py
# Topologically Sorted Source Nodes: [p_T, log_softmax, mul], Original ATen: [aten._softmax, aten._log_softmax, aten.mul]
# Source node to ATen node mapping:
# log_softmax => exp_1, log, sub_2, sum_2
# mul => mul
# p_T => div_2, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_2), kwargs = {})
triton_poi_fused__log_softmax__softmax_mul_2 = async_compile.triton('triton_poi_fused__log_softmax__softmax_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x2), xmask)
tmp10 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4u/c4ufarqsxyafbyocfh34xewotirtzn7oudrw34iy5gcj7fuzav7e.py
# Topologically Sorted Source Nodes: [sum_1, mean, loss], Original ATen: [aten.sum, aten.mean, aten.neg]
# Source node to ATen node mapping:
# loss => neg
# mean => mean
# sum_1 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_3,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
triton_per_fused_mean_neg_sum_3 = async_compile.triton('triton_per_fused_mean_neg_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_neg_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_neg_sum_3(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tmp12 = -tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_T], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_T, log_softmax, mul], Original ATen: [aten._softmax, aten._log_softmax, aten.mul]
triton_poi_fused__log_softmax__softmax_mul_2.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_1, mean, loss], Original ATen: [aten.sum, aten.mean, aten.neg]
triton_per_fused_mean_neg_sum_3.run(buf4, buf2, 1, 64, grid=grid(1), stream=stream0)
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class KdCeLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits_S, logits_T, temperature=1):
"""
Calculate the cross entropy between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
"""
if isinstance(temperature, torch.Tensor) and temperature.dim() > 0:
temperature = temperature.unsqueeze(-1)
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
p_T = F.softmax(beta_logits_T, dim=-1)
loss = -(p_T * F.log_softmax(beta_logits_S, dim=-1)).sum(dim=-1).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
@triton.jit
def triton_per_fused_mean_neg_sum_3(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tmp12 = -tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg1_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_2[grid(256)](buf0, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_mean_neg_sum_3[grid(1)](buf4, buf2, 1, 64, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf4,
class KdCeLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lonePatient/TorchBlocks | KdCeLoss | false | 15,969 | [
"MIT"
] | 82 | 4a65d746cc8a396cb7df73ed4644d97ddf843e29 | https://github.com/lonePatient/TorchBlocks/tree/4a65d746cc8a396cb7df73ed4644d97ddf843e29 |
BCNN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/g3/cg3jwchpqw7x6js5s5r4earlum6grtpazjlfueohzh4ktbenjz5j.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# x_4 => div, pow_1, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %expand), kwargs = {})
triton_per_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 0.0625
tmp2 = tmp0 * tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = tmp3 < tmp2
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp2 < tmp3
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp5 - tmp7
tmp9 = tmp8.to(tmp2.dtype)
tmp10 = tl_math.abs(tmp2)
tmp11 = 1e-08
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp19)
tmp21 = 1e-12
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = tmp14 / tmp22
tl.store(out_ptr1 + (r1 + (16*x0)), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0)
del arg0_1
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0.run(buf0, buf2, 4, 16, grid=grid(4), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class BCNN(nn.Module):
"""Bilinear Pool
implementation of Bilinear CNN (BCNN)
https://arxiv.org/abs/1504.07889v5
Args:
thresh: small positive number for computation stability
is_vec: whether the output is a vector or not
input_dim: the #channel of input feature
"""
def __init__(self, thresh=1e-08, is_vec=True, input_dim=2048):
super(BCNN, self).__init__()
self.thresh = thresh
self.is_vec = is_vec
self.output_dim = input_dim * input_dim
def _bilinearpool(self, x):
batchSize, dim, h, w = x.data.shape
x = x.reshape(batchSize, dim, h * w)
x = 1.0 / (h * w) * x.bmm(x.transpose(1, 2))
return x
def _signed_sqrt(self, x):
x = torch.mul(x.sign(), torch.sqrt(x.abs() + self.thresh))
return x
def _l2norm(self, x):
x = nn.functional.normalize(x)
return x
def forward(self, x):
x = self._bilinearpool(x)
x = self._signed_sqrt(x)
if self.is_vec:
x = x.view(x.size(0), -1)
x = self._l2norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 0.0625
tmp2 = tmp0 * tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = tmp3 < tmp2
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp2 < tmp3
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp5 - tmp7
tmp9 = tmp8.to(tmp2.dtype)
tmp10 = tl_math.abs(tmp2)
tmp11 = 1e-08
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp19)
tmp21 = 1e-12
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = tmp14 / tmp22
tl.store(out_ptr1 + (r1 + 16 * x0), tmp23, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0),
out=buf0)
del arg0_1
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0[grid(4)](buf0, buf2, 4,
16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf2,
class BCNNNew(nn.Module):
"""Bilinear Pool
implementation of Bilinear CNN (BCNN)
https://arxiv.org/abs/1504.07889v5
Args:
thresh: small positive number for computation stability
is_vec: whether the output is a vector or not
input_dim: the #channel of input feature
"""
def __init__(self, thresh=1e-08, is_vec=True, input_dim=2048):
super(BCNNNew, self).__init__()
self.thresh = thresh
self.is_vec = is_vec
self.output_dim = input_dim * input_dim
def _bilinearpool(self, x):
batchSize, dim, h, w = x.data.shape
x = x.reshape(batchSize, dim, h * w)
x = 1.0 / (h * w) * x.bmm(x.transpose(1, 2))
return x
def _signed_sqrt(self, x):
x = torch.mul(x.sign(), torch.sqrt(x.abs() + self.thresh))
return x
def _l2norm(self, x):
x = nn.functional.normalize(x)
return x
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| lvyilin/fast-MPN-COV | BCNN | false | 15,970 | [
"MIT"
] | 257 | d21c3fd2863c12f885faf20bd177dc066a25856c | https://github.com/lvyilin/fast-MPN-COV/tree/d21c3fd2863c12f885faf20bd177dc066a25856c |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rg/crgn6c7kb7sfbc6ij3j5r2ufm7sb3lducxeabudnnzg6pgowubma.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# output => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5w/c5wlk5vyhi4xuvjdnczlkprz7lkym6iu2lbv22sggqvi5gtiufco.py
# Topologically Sorted Source Nodes: [sum_1, eq], Original ATen: [aten.sum, aten.eq]
# Source node to ATen node mapping:
# eq => eq
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%sum_1, 0), kwargs = {})
triton_per_fused_eq_sum_1 = async_compile.triton('triton_per_fused_eq_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_eq_sum_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_eq_sum_1(in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 0.0
tmp5 = tmp3 == tmp4
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.bool)
# Topologically Sorted Source Nodes: [sum_1, eq], Original ATen: [aten.sum, aten.eq]
triton_per_fused_eq_sum_1.run(arg1_1, buf2, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
return (buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-07):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.eps = eps
def forward(self, output, target):
output = torch.sigmoid(output)
if torch.sum(target) == 0:
output = 1.0 - output
target = 1.0 - target
return 1.0 - (2 * torch.sum(output * target) + self.smooth) / (
torch.sum(output) + torch.sum(target) + self.smooth + self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused_eq_sum_1(in_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 0.0
tmp5 = tmp3 == tmp4
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.bool)
triton_per_fused_eq_sum_1[grid(1)](arg1_1, buf2, 1, 256, num_warps=
2, num_stages=1)
del arg1_1
return buf0, buf2
class DiceLossNew(nn.Module):
def __init__(self, smooth=1.0, eps=1e-07):
super(DiceLossNew, self).__init__()
self.smooth = smooth
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lyakaap/pytorch-template | DiceLoss | false | 15,971 | [
"MIT"
] | 140 | eff9f0a4dd50fa49c3b949065247598d5eabc91e | https://github.com/lyakaap/pytorch-template/tree/eff9f0a4dd50fa49c3b949065247598d5eabc91e |
Truncation2D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/i6/ci6z5mzow5osw3dg4aqxcjm6cnkkx2erpzc5642i5hzvuq2fje2j.py
# Topologically Sorted Source Nodes: [iadd_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_2 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_36, %select_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 22, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp264 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp4 & tmp6
tmp8 = tmp7 & tmp2
tmp9 = tmp2 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp2 & tmp10
tmp12 = tmp3 < tmp1
tmp13 = tmp12 & tmp11
tmp14 = tmp2 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (8 + x2), tmp15 & xmask, other=0.0)
tmp17 = 0.0
tmp18 = tmp17 + tmp16
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp15, tmp18, tmp19)
tmp21 = tl.where(tmp12, tmp20, tmp17)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.where(tmp2, tmp23, tmp17)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp2 & tmp11
tmp28 = tmp12 & tmp27
tmp29 = tl.load(in_ptr0 + (8 + x2), tmp28 & xmask, other=0.0)
tmp30 = tmp17 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp28, tmp30, tmp31)
tmp33 = tl.where(tmp12, tmp32, tmp17)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp2, tmp35, tmp17)
tmp37 = tl.where(tmp12, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp11, tmp37, tmp38)
tmp40 = tl.load(in_ptr0 + (8 + x2), tmp13 & xmask, other=0.0)
tmp41 = tmp17 + tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp13, tmp41, tmp42)
tmp44 = tl.where(tmp12, tmp43, tmp17)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp11, tmp44, tmp45)
tmp47 = tl.where(tmp2, tmp46, tmp17)
tmp48 = tl.where(tmp2, tmp39, tmp47)
tmp49 = tl.load(in_ptr0 + (20 + x2), tmp10 & xmask, other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp10, tmp50, tmp51)
tmp53 = tmp2 & tmp9
tmp54 = tmp12 & tmp53
tmp55 = tmp2 & tmp54
tmp56 = tmp12 & tmp55
tmp57 = tl.load(in_ptr0 + (8 + x2), tmp56 & xmask, other=0.0)
tmp58 = tmp17 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp56, tmp58, tmp59)
tmp61 = tl.where(tmp12, tmp60, tmp17)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp55, tmp61, tmp62)
tmp64 = tl.where(tmp2, tmp63, tmp17)
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp54, tmp64, tmp65)
tmp67 = tmp2 & tmp53
tmp68 = tmp12 & tmp67
tmp69 = tl.load(in_ptr0 + (8 + x2), tmp68 & xmask, other=0.0)
tmp70 = tmp17 + tmp69
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp68, tmp70, tmp71)
tmp73 = tl.where(tmp12, tmp72, tmp17)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp67, tmp73, tmp74)
tmp76 = tl.where(tmp2, tmp75, tmp17)
tmp77 = tl.where(tmp12, tmp66, tmp76)
tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype)
tmp79 = tl.where(tmp53, tmp77, tmp78)
tmp80 = tl.load(in_ptr0 + (8 + x2), tmp54 & xmask, other=0.0)
tmp81 = tmp17 + tmp80
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp54, tmp81, tmp82)
tmp84 = tl.where(tmp12, tmp83, tmp17)
tmp85 = tl.full(tmp84.shape, 0.0, tmp84.dtype)
tmp86 = tl.where(tmp53, tmp84, tmp85)
tmp87 = tl.where(tmp2, tmp86, tmp17)
tmp88 = tl.where(tmp2, tmp79, tmp87)
tmp89 = tl.where(tmp7, tmp52, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp9, tmp89, tmp90)
tmp92 = tmp12 & tmp9
tmp93 = tmp2 & tmp92
tmp94 = tmp12 & tmp93
tmp95 = tl.load(in_ptr0 + (8 + x2), tmp94 & xmask, other=0.0)
tmp96 = tmp17 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp94, tmp96, tmp97)
tmp99 = tl.where(tmp12, tmp98, tmp17)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp93, tmp99, tmp100)
tmp102 = tl.where(tmp2, tmp101, tmp17)
tmp103 = tl.full(tmp102.shape, 0.0, tmp102.dtype)
tmp104 = tl.where(tmp92, tmp102, tmp103)
tmp105 = tl.where(tmp12, tmp104, tmp87)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tl.load(in_ptr0 + (8 + x2), tmp92 & xmask, other=0.0)
tmp109 = tmp17 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp92, tmp109, tmp110)
tmp112 = tl.where(tmp12, tmp111, tmp17)
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp9, tmp112, tmp113)
tmp115 = tl.where(tmp2, tmp114, tmp17)
tmp116 = tl.where(tmp2, tmp107, tmp115)
tmp117 = tl.where(tmp2, tmp91, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp8, tmp117, tmp118)
tmp120 = tmp2 & tmp2
tmp121 = tmp7 & tmp120
tmp122 = tmp2 & tmp121
tmp123 = tmp12 & tmp122
tmp124 = tmp2 & tmp123
tmp125 = tmp12 & tmp124
tmp126 = tl.load(in_ptr0 + (8 + x2), tmp125 & xmask, other=0.0)
tmp127 = tmp17 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp125, tmp127, tmp128)
tmp130 = tl.where(tmp12, tmp129, tmp17)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp124, tmp130, tmp131)
tmp133 = tl.where(tmp2, tmp132, tmp17)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp123, tmp133, tmp134)
tmp136 = tmp2 & tmp122
tmp137 = tmp12 & tmp136
tmp138 = tl.load(in_ptr0 + (8 + x2), tmp137 & xmask, other=0.0)
tmp139 = tmp17 + tmp138
tmp140 = tl.full(tmp139.shape, 0.0, tmp139.dtype)
tmp141 = tl.where(tmp137, tmp139, tmp140)
tmp142 = tl.where(tmp12, tmp141, tmp17)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp136, tmp142, tmp143)
tmp145 = tl.where(tmp2, tmp144, tmp17)
tmp146 = tl.where(tmp12, tmp135, tmp145)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp122, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (8 + x2), tmp123 & xmask, other=0.0)
tmp150 = tmp17 + tmp149
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp123, tmp150, tmp151)
tmp153 = tl.where(tmp12, tmp152, tmp17)
tmp154 = tl.full(tmp153.shape, 0.0, tmp153.dtype)
tmp155 = tl.where(tmp122, tmp153, tmp154)
tmp156 = tl.where(tmp2, tmp155, tmp17)
tmp157 = tl.where(tmp2, tmp148, tmp156)
tmp158 = tl.load(in_ptr0 + (20 + x2), tmp121 & xmask, other=0.0)
tmp159 = tmp157 + tmp158
tmp160 = tl.full(tmp159.shape, 0.0, tmp159.dtype)
tmp161 = tl.where(tmp121, tmp159, tmp160)
tmp162 = tmp2 & tmp120
tmp163 = tmp12 & tmp162
tmp164 = tmp2 & tmp163
tmp165 = tmp12 & tmp164
tmp166 = tl.load(in_ptr0 + (8 + x2), tmp165 & xmask, other=0.0)
tmp167 = tmp17 + tmp166
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp165, tmp167, tmp168)
tmp170 = tl.where(tmp12, tmp169, tmp17)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp164, tmp170, tmp171)
tmp173 = tl.where(tmp2, tmp172, tmp17)
tmp174 = tl.full(tmp173.shape, 0.0, tmp173.dtype)
tmp175 = tl.where(tmp163, tmp173, tmp174)
tmp176 = tmp2 & tmp162
tmp177 = tmp12 & tmp176
tmp178 = tl.load(in_ptr0 + (8 + x2), tmp177 & xmask, other=0.0)
tmp179 = tmp17 + tmp178
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp177, tmp179, tmp180)
tmp182 = tl.where(tmp12, tmp181, tmp17)
tmp183 = tl.full(tmp182.shape, 0.0, tmp182.dtype)
tmp184 = tl.where(tmp176, tmp182, tmp183)
tmp185 = tl.where(tmp2, tmp184, tmp17)
tmp186 = tl.where(tmp12, tmp175, tmp185)
tmp187 = tl.full(tmp186.shape, 0.0, tmp186.dtype)
tmp188 = tl.where(tmp162, tmp186, tmp187)
tmp189 = tl.load(in_ptr0 + (8 + x2), tmp163 & xmask, other=0.0)
tmp190 = tmp17 + tmp189
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp163, tmp190, tmp191)
tmp193 = tl.where(tmp12, tmp192, tmp17)
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp162, tmp193, tmp194)
tmp196 = tl.where(tmp2, tmp195, tmp17)
tmp197 = tl.where(tmp2, tmp188, tmp196)
tmp198 = tl.where(tmp7, tmp161, tmp197)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp120, tmp198, tmp199)
tmp201 = tmp12 & tmp120
tmp202 = tmp2 & tmp201
tmp203 = tmp12 & tmp202
tmp204 = tl.load(in_ptr0 + (8 + x2), tmp203 & xmask, other=0.0)
tmp205 = tmp17 + tmp204
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp203, tmp205, tmp206)
tmp208 = tl.where(tmp12, tmp207, tmp17)
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp202, tmp208, tmp209)
tmp211 = tl.where(tmp2, tmp210, tmp17)
tmp212 = tl.full(tmp211.shape, 0.0, tmp211.dtype)
tmp213 = tl.where(tmp201, tmp211, tmp212)
tmp214 = tl.where(tmp12, tmp213, tmp196)
tmp215 = tl.full(tmp214.shape, 0.0, tmp214.dtype)
tmp216 = tl.where(tmp120, tmp214, tmp215)
tmp217 = tl.load(in_ptr0 + (8 + x2), tmp201 & xmask, other=0.0)
tmp218 = tmp17 + tmp217
tmp219 = tl.full(tmp218.shape, 0.0, tmp218.dtype)
tmp220 = tl.where(tmp201, tmp218, tmp219)
tmp221 = tl.where(tmp12, tmp220, tmp17)
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp120, tmp221, tmp222)
tmp224 = tl.where(tmp2, tmp223, tmp17)
tmp225 = tl.where(tmp2, tmp216, tmp224)
tmp226 = tl.where(tmp2, tmp200, tmp225)
tmp227 = tl.where(tmp7, tmp119, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp2, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (20 + x2), tmp8 & xmask, other=0.0)
tmp231 = tmp116 + tmp230
tmp232 = tl.full(tmp231.shape, 0.0, tmp231.dtype)
tmp233 = tl.where(tmp8, tmp231, tmp232)
tmp234 = tl.where(tmp7, tmp233, tmp225)
tmp235 = tl.full(tmp234.shape, 0.0, tmp234.dtype)
tmp236 = tl.where(tmp2, tmp234, tmp235)
tmp237 = tmp12 & tmp2
tmp238 = tmp2 & tmp237
tmp239 = tmp12 & tmp238
tmp240 = tl.load(in_ptr0 + (8 + x2), tmp239 & xmask, other=0.0)
tmp241 = tmp17 + tmp240
tmp242 = tl.full(tmp241.shape, 0.0, tmp241.dtype)
tmp243 = tl.where(tmp239, tmp241, tmp242)
tmp244 = tl.where(tmp12, tmp243, tmp17)
tmp245 = tl.full(tmp244.shape, 0.0, tmp244.dtype)
tmp246 = tl.where(tmp238, tmp244, tmp245)
tmp247 = tl.where(tmp2, tmp246, tmp17)
tmp248 = tl.full(tmp247.shape, 0.0, tmp247.dtype)
tmp249 = tl.where(tmp237, tmp247, tmp248)
tmp250 = tl.where(tmp12, tmp249, tmp224)
tmp251 = tl.full(tmp250.shape, 0.0, tmp250.dtype)
tmp252 = tl.where(tmp2, tmp250, tmp251)
tmp253 = tl.load(in_ptr0 + (8 + x2), tmp237 & xmask, other=0.0)
tmp254 = tmp17 + tmp253
tmp255 = tl.full(tmp254.shape, 0.0, tmp254.dtype)
tmp256 = tl.where(tmp237, tmp254, tmp255)
tmp257 = tl.where(tmp12, tmp256, tmp17)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp2, tmp257, tmp258)
tmp260 = tl.where(tmp2, tmp259, tmp17)
tmp261 = tl.where(tmp2, tmp252, tmp260)
tmp262 = tl.where(tmp2, tmp236, tmp261)
tmp263 = tl.where(tmp2, tmp229, tmp262)
tmp265 = tmp263 + tmp264
tl.store(out_ptr0 + (x2), tmp265, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7ndkv5xcqcdpgnxbwsw3uvfe5enml37jmdeboltxlcmhvnwppbe.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %slice_scatter_default_8 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_4, %add_2, 1, 8, 12), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 22, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-8) + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tmp7 = x1
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp0 >= tmp8
tmp11 = tmp0 < tmp1
tmp12 = tmp10 & tmp11
tmp13 = tmp12 & tmp9
tmp14 = tmp9 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tmp9 & tmp15
tmp17 = tmp0 < tmp8
tmp18 = tmp17 & tmp16
tmp19 = tmp9 & tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp20 & xmask, other=0.0)
tmp22 = 0.0
tmp23 = tmp22 + tmp21
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp17, tmp25, tmp22)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp19, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp28, tmp22)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp18, tmp29, tmp30)
tmp32 = tmp9 & tmp16
tmp33 = tmp17 & tmp32
tmp34 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp33 & xmask, other=0.0)
tmp35 = tmp22 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp33, tmp35, tmp36)
tmp38 = tl.where(tmp17, tmp37, tmp22)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp9, tmp40, tmp22)
tmp42 = tl.where(tmp17, tmp31, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp16, tmp42, tmp43)
tmp45 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp18 & xmask, other=0.0)
tmp46 = tmp22 + tmp45
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp18, tmp46, tmp47)
tmp49 = tl.where(tmp17, tmp48, tmp22)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp9, tmp51, tmp22)
tmp53 = tl.where(tmp9, tmp44, tmp52)
tmp54 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp55 = tmp53 + tmp54
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp15, tmp55, tmp56)
tmp58 = tmp9 & tmp14
tmp59 = tmp17 & tmp58
tmp60 = tmp9 & tmp59
tmp61 = tmp17 & tmp60
tmp62 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp61 & xmask, other=0.0)
tmp63 = tmp22 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp61, tmp63, tmp64)
tmp66 = tl.where(tmp17, tmp65, tmp22)
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp60, tmp66, tmp67)
tmp69 = tl.where(tmp9, tmp68, tmp22)
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp59, tmp69, tmp70)
tmp72 = tmp9 & tmp58
tmp73 = tmp17 & tmp72
tmp74 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp73 & xmask, other=0.0)
tmp75 = tmp22 + tmp74
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp73, tmp75, tmp76)
tmp78 = tl.where(tmp17, tmp77, tmp22)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp72, tmp78, tmp79)
tmp81 = tl.where(tmp9, tmp80, tmp22)
tmp82 = tl.where(tmp17, tmp71, tmp81)
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp58, tmp82, tmp83)
tmp85 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp59 & xmask, other=0.0)
tmp86 = tmp22 + tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp59, tmp86, tmp87)
tmp89 = tl.where(tmp17, tmp88, tmp22)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp58, tmp89, tmp90)
tmp92 = tl.where(tmp9, tmp91, tmp22)
tmp93 = tl.where(tmp9, tmp84, tmp92)
tmp94 = tl.where(tmp12, tmp57, tmp93)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp14, tmp94, tmp95)
tmp97 = tmp17 & tmp14
tmp98 = tmp9 & tmp97
tmp99 = tmp17 & tmp98
tmp100 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp99 & xmask, other=0.0)
tmp101 = tmp22 + tmp100
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp99, tmp101, tmp102)
tmp104 = tl.where(tmp17, tmp103, tmp22)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp98, tmp104, tmp105)
tmp107 = tl.where(tmp9, tmp106, tmp22)
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp97, tmp107, tmp108)
tmp110 = tl.where(tmp17, tmp109, tmp92)
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp14, tmp110, tmp111)
tmp113 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp97 & xmask, other=0.0)
tmp114 = tmp22 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp97, tmp114, tmp115)
tmp117 = tl.where(tmp17, tmp116, tmp22)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp14, tmp117, tmp118)
tmp120 = tl.where(tmp9, tmp119, tmp22)
tmp121 = tl.where(tmp9, tmp112, tmp120)
tmp122 = tl.where(tmp9, tmp96, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp13, tmp122, tmp123)
tmp125 = tmp9 & tmp9
tmp126 = tmp12 & tmp125
tmp127 = tmp9 & tmp126
tmp128 = tmp17 & tmp127
tmp129 = tmp9 & tmp128
tmp130 = tmp17 & tmp129
tmp131 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp130 & xmask, other=0.0)
tmp132 = tmp22 + tmp131
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp130, tmp132, tmp133)
tmp135 = tl.where(tmp17, tmp134, tmp22)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp129, tmp135, tmp136)
tmp138 = tl.where(tmp9, tmp137, tmp22)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp128, tmp138, tmp139)
tmp141 = tmp9 & tmp127
tmp142 = tmp17 & tmp141
tmp143 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp142 & xmask, other=0.0)
tmp144 = tmp22 + tmp143
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp142, tmp144, tmp145)
tmp147 = tl.where(tmp17, tmp146, tmp22)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp141, tmp147, tmp148)
tmp150 = tl.where(tmp9, tmp149, tmp22)
tmp151 = tl.where(tmp17, tmp140, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp127, tmp151, tmp152)
tmp154 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp128 & xmask, other=0.0)
tmp155 = tmp22 + tmp154
tmp156 = tl.full(tmp155.shape, 0.0, tmp155.dtype)
tmp157 = tl.where(tmp128, tmp155, tmp156)
tmp158 = tl.where(tmp17, tmp157, tmp22)
tmp159 = tl.full(tmp158.shape, 0.0, tmp158.dtype)
tmp160 = tl.where(tmp127, tmp158, tmp159)
tmp161 = tl.where(tmp9, tmp160, tmp22)
tmp162 = tl.where(tmp9, tmp153, tmp161)
tmp163 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp126 & xmask, other=0.0)
tmp164 = tmp162 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp126, tmp164, tmp165)
tmp167 = tmp9 & tmp125
tmp168 = tmp17 & tmp167
tmp169 = tmp9 & tmp168
tmp170 = tmp17 & tmp169
tmp171 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp170 & xmask, other=0.0)
tmp172 = tmp22 + tmp171
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp170, tmp172, tmp173)
tmp175 = tl.where(tmp17, tmp174, tmp22)
tmp176 = tl.full(tmp175.shape, 0.0, tmp175.dtype)
tmp177 = tl.where(tmp169, tmp175, tmp176)
tmp178 = tl.where(tmp9, tmp177, tmp22)
tmp179 = tl.full(tmp178.shape, 0.0, tmp178.dtype)
tmp180 = tl.where(tmp168, tmp178, tmp179)
tmp181 = tmp9 & tmp167
tmp182 = tmp17 & tmp181
tmp183 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp182 & xmask, other=0.0)
tmp184 = tmp22 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp182, tmp184, tmp185)
tmp187 = tl.where(tmp17, tmp186, tmp22)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp181, tmp187, tmp188)
tmp190 = tl.where(tmp9, tmp189, tmp22)
tmp191 = tl.where(tmp17, tmp180, tmp190)
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp167, tmp191, tmp192)
tmp194 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp168 & xmask, other=0.0)
tmp195 = tmp22 + tmp194
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp168, tmp195, tmp196)
tmp198 = tl.where(tmp17, tmp197, tmp22)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp167, tmp198, tmp199)
tmp201 = tl.where(tmp9, tmp200, tmp22)
tmp202 = tl.where(tmp9, tmp193, tmp201)
tmp203 = tl.where(tmp12, tmp166, tmp202)
tmp204 = tl.full(tmp203.shape, 0.0, tmp203.dtype)
tmp205 = tl.where(tmp125, tmp203, tmp204)
tmp206 = tmp17 & tmp125
tmp207 = tmp9 & tmp206
tmp208 = tmp17 & tmp207
tmp209 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp208 & xmask, other=0.0)
tmp210 = tmp22 + tmp209
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp208, tmp210, tmp211)
tmp213 = tl.where(tmp17, tmp212, tmp22)
tmp214 = tl.full(tmp213.shape, 0.0, tmp213.dtype)
tmp215 = tl.where(tmp207, tmp213, tmp214)
tmp216 = tl.where(tmp9, tmp215, tmp22)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp206, tmp216, tmp217)
tmp219 = tl.where(tmp17, tmp218, tmp201)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp125, tmp219, tmp220)
tmp222 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp206 & xmask, other=0.0)
tmp223 = tmp22 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp206, tmp223, tmp224)
tmp226 = tl.where(tmp17, tmp225, tmp22)
tmp227 = tl.full(tmp226.shape, 0.0, tmp226.dtype)
tmp228 = tl.where(tmp125, tmp226, tmp227)
tmp229 = tl.where(tmp9, tmp228, tmp22)
tmp230 = tl.where(tmp9, tmp221, tmp229)
tmp231 = tl.where(tmp9, tmp205, tmp230)
tmp232 = tl.where(tmp12, tmp124, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp13 & xmask, other=0.0)
tmp236 = tmp121 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp13, tmp236, tmp237)
tmp239 = tl.where(tmp12, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp9, tmp239, tmp240)
tmp242 = tmp17 & tmp9
tmp243 = tmp9 & tmp242
tmp244 = tmp17 & tmp243
tmp245 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp244 & xmask, other=0.0)
tmp246 = tmp22 + tmp245
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp244, tmp246, tmp247)
tmp249 = tl.where(tmp17, tmp248, tmp22)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp243, tmp249, tmp250)
tmp252 = tl.where(tmp9, tmp251, tmp22)
tmp253 = tl.full(tmp252.shape, 0.0, tmp252.dtype)
tmp254 = tl.where(tmp242, tmp252, tmp253)
tmp255 = tl.where(tmp17, tmp254, tmp229)
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp9, tmp255, tmp256)
tmp258 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp242 & xmask, other=0.0)
tmp259 = tmp22 + tmp258
tmp260 = tl.full(tmp259.shape, 0.0, tmp259.dtype)
tmp261 = tl.where(tmp242, tmp259, tmp260)
tmp262 = tl.where(tmp17, tmp261, tmp22)
tmp263 = tl.full(tmp262.shape, 0.0, tmp262.dtype)
tmp264 = tl.where(tmp9, tmp262, tmp263)
tmp265 = tl.where(tmp9, tmp264, tmp22)
tmp266 = tl.where(tmp9, tmp257, tmp265)
tmp267 = tl.where(tmp9, tmp241, tmp266)
tmp268 = tl.where(tmp9, tmp234, tmp267)
tmp269 = tl.where(tmp5, tmp6, tmp268)
tl.store(out_ptr0 + (x2), tmp269, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crsismazcidlj2wh2jvwydvqnpv6f4wjc5xse4fhqmjr6y73kkqp.py
# Topologically Sorted Source Nodes: [output, iadd, iadd_1, iadd_3, iadd_4], Original ATen: [aten.zeros, aten.add]
# Source node to ATen node mapping:
# iadd => add
# iadd_1 => add_1
# iadd_3 => add_3
# iadd_4 => add_4
# output => full
# Graph fragment:
# %full : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([16, 16], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %select_1), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %add, 1, 0, 4), kwargs = {})
# %slice_scatter_default_1 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %slice_scatter_default, 0, 0, 4), kwargs = {})
# %slice_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %slice_7, 1, 0, 4), kwargs = {})
# %slice_scatter_default_3 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_scatter_default_2, 0, 0, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_20, %select_3), kwargs = {})
# %slice_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_2, %add_1, 1, 4, 8), kwargs = {})
# %slice_scatter_default_5 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %slice_scatter_default_4, 0, 0, 4), kwargs = {})
# %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_3, %slice_23, 1, 4, 8), kwargs = {})
# %slice_scatter_default_7 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %slice_scatter_default_6, 0, 0, 4), kwargs = {})
# %slice_scatter_default_9 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %slice_scatter_default_8, 0, 0, 4), kwargs = {})
# %slice_scatter_default_10 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_5, %slice_39, 1, 8, 12), kwargs = {})
# %slice_scatter_default_11 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %slice_scatter_default_10, 0, 0, 4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_52, %select_7), kwargs = {})
# %slice_scatter_default_12 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_6, %add_3, 1, 12, 16), kwargs = {})
# %slice_scatter_default_13 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %slice_scatter_default_12, 0, 0, 4), kwargs = {})
# %slice_scatter_default_14 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_7, %slice_55, 1, 12, 16), kwargs = {})
# %slice_scatter_default_15 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %slice_scatter_default_14, 0, 0, 4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_68, %select_9), kwargs = {})
# %slice_scatter_default_16 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_8, %add_4, 1, 0, 4), kwargs = {})
# %slice_scatter_default_17 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %slice_scatter_default_16, 0, 4, 8), kwargs = {})
triton_poi_fused_add_zeros_2 = async_compile.triton('triton_poi_fused_add_zeros_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_zeros_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_zeros_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x2), tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tmp4 >= tmp1
tmp6 = tl.full([1], 8, tl.int64)
tmp7 = tmp4 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp8 & tmp10
tmp12 = tmp2 & tmp11
tmp13 = tmp4 < tmp1
tmp14 = tmp13 & tmp12
tmp15 = tmp2 & tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp16 & xmask, other=0.0)
tmp18 = 0.0
tmp19 = tmp18 + tmp17
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp16, tmp19, tmp20)
tmp22 = tl.where(tmp13, tmp21, tmp18)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp15, tmp22, tmp23)
tmp25 = tl.where(tmp2, tmp24, tmp18)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tmp2 & tmp12
tmp29 = tmp13 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp29 & xmask, other=0.0)
tmp31 = tmp18 + tmp30
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tl.where(tmp13, tmp33, tmp18)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp28, tmp34, tmp35)
tmp37 = tl.where(tmp2, tmp36, tmp18)
tmp38 = tl.where(tmp13, tmp27, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp12, tmp38, tmp39)
tmp41 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp14 & xmask, other=0.0)
tmp42 = tmp18 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp14, tmp42, tmp43)
tmp45 = tl.where(tmp13, tmp44, tmp18)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp12, tmp45, tmp46)
tmp48 = tl.where(tmp2, tmp47, tmp18)
tmp49 = tl.where(tmp2, tmp40, tmp48)
tmp50 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp11 & xmask, other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp11, tmp51, tmp52)
tmp54 = tmp2 & tmp10
tmp55 = tmp13 & tmp54
tmp56 = tmp2 & tmp55
tmp57 = tmp13 & tmp56
tmp58 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp57 & xmask, other=0.0)
tmp59 = tmp18 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp57, tmp59, tmp60)
tmp62 = tl.where(tmp13, tmp61, tmp18)
tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype)
tmp64 = tl.where(tmp56, tmp62, tmp63)
tmp65 = tl.where(tmp2, tmp64, tmp18)
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp55, tmp65, tmp66)
tmp68 = tmp2 & tmp54
tmp69 = tmp13 & tmp68
tmp70 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp69 & xmask, other=0.0)
tmp71 = tmp18 + tmp70
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp69, tmp71, tmp72)
tmp74 = tl.where(tmp13, tmp73, tmp18)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp68, tmp74, tmp75)
tmp77 = tl.where(tmp2, tmp76, tmp18)
tmp78 = tl.where(tmp13, tmp67, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp55 & xmask, other=0.0)
tmp82 = tmp18 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp55, tmp82, tmp83)
tmp85 = tl.where(tmp13, tmp84, tmp18)
tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype)
tmp87 = tl.where(tmp54, tmp85, tmp86)
tmp88 = tl.where(tmp2, tmp87, tmp18)
tmp89 = tl.where(tmp2, tmp80, tmp88)
tmp90 = tl.where(tmp8, tmp53, tmp89)
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp10, tmp90, tmp91)
tmp93 = tmp13 & tmp10
tmp94 = tmp2 & tmp93
tmp95 = tmp13 & tmp94
tmp96 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp95 & xmask, other=0.0)
tmp97 = tmp18 + tmp96
tmp98 = tl.full(tmp97.shape, 0.0, tmp97.dtype)
tmp99 = tl.where(tmp95, tmp97, tmp98)
tmp100 = tl.where(tmp13, tmp99, tmp18)
tmp101 = tl.full(tmp100.shape, 0.0, tmp100.dtype)
tmp102 = tl.where(tmp94, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp102, tmp18)
tmp104 = tl.full(tmp103.shape, 0.0, tmp103.dtype)
tmp105 = tl.where(tmp93, tmp103, tmp104)
tmp106 = tl.where(tmp13, tmp105, tmp88)
tmp107 = tl.full(tmp106.shape, 0.0, tmp106.dtype)
tmp108 = tl.where(tmp10, tmp106, tmp107)
tmp109 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp93 & xmask, other=0.0)
tmp110 = tmp18 + tmp109
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp93, tmp110, tmp111)
tmp113 = tl.where(tmp13, tmp112, tmp18)
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp10, tmp113, tmp114)
tmp116 = tl.where(tmp2, tmp115, tmp18)
tmp117 = tl.where(tmp2, tmp108, tmp116)
tmp118 = tl.where(tmp2, tmp92, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp9, tmp118, tmp119)
tmp121 = tmp2 & tmp2
tmp122 = tmp8 & tmp121
tmp123 = tmp2 & tmp122
tmp124 = tmp13 & tmp123
tmp125 = tmp2 & tmp124
tmp126 = tmp13 & tmp125
tmp127 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp126 & xmask, other=0.0)
tmp128 = tmp18 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp126, tmp128, tmp129)
tmp131 = tl.where(tmp13, tmp130, tmp18)
tmp132 = tl.full(tmp131.shape, 0.0, tmp131.dtype)
tmp133 = tl.where(tmp125, tmp131, tmp132)
tmp134 = tl.where(tmp2, tmp133, tmp18)
tmp135 = tl.full(tmp134.shape, 0.0, tmp134.dtype)
tmp136 = tl.where(tmp124, tmp134, tmp135)
tmp137 = tmp2 & tmp123
tmp138 = tmp13 & tmp137
tmp139 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp138 & xmask, other=0.0)
tmp140 = tmp18 + tmp139
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp138, tmp140, tmp141)
tmp143 = tl.where(tmp13, tmp142, tmp18)
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp137, tmp143, tmp144)
tmp146 = tl.where(tmp2, tmp145, tmp18)
tmp147 = tl.where(tmp13, tmp136, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp123, tmp147, tmp148)
tmp150 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp124 & xmask, other=0.0)
tmp151 = tmp18 + tmp150
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp124, tmp151, tmp152)
tmp154 = tl.where(tmp13, tmp153, tmp18)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp123, tmp154, tmp155)
tmp157 = tl.where(tmp2, tmp156, tmp18)
tmp158 = tl.where(tmp2, tmp149, tmp157)
tmp159 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp122 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp122, tmp160, tmp161)
tmp163 = tmp2 & tmp121
tmp164 = tmp13 & tmp163
tmp165 = tmp2 & tmp164
tmp166 = tmp13 & tmp165
tmp167 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp166 & xmask, other=0.0)
tmp168 = tmp18 + tmp167
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp166, tmp168, tmp169)
tmp171 = tl.where(tmp13, tmp170, tmp18)
tmp172 = tl.full(tmp171.shape, 0.0, tmp171.dtype)
tmp173 = tl.where(tmp165, tmp171, tmp172)
tmp174 = tl.where(tmp2, tmp173, tmp18)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp164, tmp174, tmp175)
tmp177 = tmp2 & tmp163
tmp178 = tmp13 & tmp177
tmp179 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp178 & xmask, other=0.0)
tmp180 = tmp18 + tmp179
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp178, tmp180, tmp181)
tmp183 = tl.where(tmp13, tmp182, tmp18)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp177, tmp183, tmp184)
tmp186 = tl.where(tmp2, tmp185, tmp18)
tmp187 = tl.where(tmp13, tmp176, tmp186)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp163, tmp187, tmp188)
tmp190 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp164 & xmask, other=0.0)
tmp191 = tmp18 + tmp190
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp164, tmp191, tmp192)
tmp194 = tl.where(tmp13, tmp193, tmp18)
tmp195 = tl.full(tmp194.shape, 0.0, tmp194.dtype)
tmp196 = tl.where(tmp163, tmp194, tmp195)
tmp197 = tl.where(tmp2, tmp196, tmp18)
tmp198 = tl.where(tmp2, tmp189, tmp197)
tmp199 = tl.where(tmp8, tmp162, tmp198)
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp121, tmp199, tmp200)
tmp202 = tmp13 & tmp121
tmp203 = tmp2 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp204 & xmask, other=0.0)
tmp206 = tmp18 + tmp205
tmp207 = tl.full(tmp206.shape, 0.0, tmp206.dtype)
tmp208 = tl.where(tmp204, tmp206, tmp207)
tmp209 = tl.where(tmp13, tmp208, tmp18)
tmp210 = tl.full(tmp209.shape, 0.0, tmp209.dtype)
tmp211 = tl.where(tmp203, tmp209, tmp210)
tmp212 = tl.where(tmp2, tmp211, tmp18)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp202, tmp212, tmp213)
tmp215 = tl.where(tmp13, tmp214, tmp197)
tmp216 = tl.full(tmp215.shape, 0.0, tmp215.dtype)
tmp217 = tl.where(tmp121, tmp215, tmp216)
tmp218 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp202 & xmask, other=0.0)
tmp219 = tmp18 + tmp218
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp202, tmp219, tmp220)
tmp222 = tl.where(tmp13, tmp221, tmp18)
tmp223 = tl.full(tmp222.shape, 0.0, tmp222.dtype)
tmp224 = tl.where(tmp121, tmp222, tmp223)
tmp225 = tl.where(tmp2, tmp224, tmp18)
tmp226 = tl.where(tmp2, tmp217, tmp225)
tmp227 = tl.where(tmp2, tmp201, tmp226)
tmp228 = tl.where(tmp8, tmp120, tmp227)
tmp229 = tl.full(tmp228.shape, 0.0, tmp228.dtype)
tmp230 = tl.where(tmp2, tmp228, tmp229)
tmp231 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp9 & xmask, other=0.0)
tmp232 = tmp117 + tmp231
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.where(tmp8, tmp234, tmp226)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp2, tmp235, tmp236)
tmp238 = tmp13 & tmp2
tmp239 = tmp2 & tmp238
tmp240 = tmp13 & tmp239
tmp241 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp240 & xmask, other=0.0)
tmp242 = tmp18 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp13, tmp244, tmp18)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp2, tmp247, tmp18)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp13, tmp250, tmp225)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp2, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp238 & xmask, other=0.0)
tmp255 = tmp18 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp13, tmp257, tmp18)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp2, tmp258, tmp259)
tmp261 = tl.where(tmp2, tmp260, tmp18)
tmp262 = tl.where(tmp2, tmp253, tmp261)
tmp263 = tl.where(tmp2, tmp237, tmp262)
tmp264 = tl.where(tmp2, tmp230, tmp263)
tmp265 = tl.where(tmp2, tmp3, tmp264)
tmp266 = tmp0 >= tmp1
tmp267 = tmp0 < tmp6
tmp268 = tmp266 & tmp267
tmp269 = tmp13 & tmp268
tmp270 = tmp2 & tmp269
tmp271 = tl.full([1], 12, tl.int64)
tmp272 = tmp4 >= tmp271
tmp273 = tmp272 & tmp270
tmp274 = tmp2 & tmp273
tmp275 = tmp272 & tmp274
tmp276 = tmp2 & tmp275
tmp277 = tmp4 >= tmp6
tmp278 = tmp4 < tmp271
tmp279 = tmp277 & tmp278
tmp280 = tmp279 & tmp276
tmp281 = tl.where(tmp279, tmp265, tmp265)
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp276, tmp281, tmp282)
tmp284 = tl.where(tmp2, tmp283, tmp265)
tmp285 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp275 & xmask, other=0.0)
tmp286 = tmp284 + tmp285
tmp287 = tl.full(tmp286.shape, 0.0, tmp286.dtype)
tmp288 = tl.where(tmp275, tmp286, tmp287)
tmp289 = tmp2 & tmp274
tmp290 = tmp279 & tmp289
tmp291 = tl.where(tmp289, tmp281, tmp282)
tmp292 = tl.where(tmp2, tmp291, tmp265)
tmp293 = tl.where(tmp272, tmp288, tmp292)
tmp294 = tl.full(tmp293.shape, 0.0, tmp293.dtype)
tmp295 = tl.where(tmp274, tmp293, tmp294)
tmp296 = tmp279 & tmp274
tmp297 = tl.where(tmp274, tmp281, tmp282)
tmp298 = tl.where(tmp2, tmp297, tmp265)
tmp299 = tl.where(tmp2, tmp295, tmp298)
tmp300 = tl.full(tmp299.shape, 0.0, tmp299.dtype)
tmp301 = tl.where(tmp273, tmp299, tmp300)
tmp302 = tmp2 & tmp270
tmp303 = tmp272 & tmp302
tmp304 = tmp2 & tmp303
tmp305 = tmp279 & tmp304
tmp306 = tl.where(tmp304, tmp281, tmp282)
tmp307 = tl.where(tmp2, tmp306, tmp265)
tmp308 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp303 & xmask, other=0.0)
tmp309 = tmp307 + tmp308
tmp310 = tl.full(tmp309.shape, 0.0, tmp309.dtype)
tmp311 = tl.where(tmp303, tmp309, tmp310)
tmp312 = tmp2 & tmp302
tmp313 = tmp279 & tmp312
tmp314 = tl.where(tmp312, tmp281, tmp282)
tmp315 = tl.where(tmp2, tmp314, tmp265)
tmp316 = tl.where(tmp272, tmp311, tmp315)
tmp317 = tl.full(tmp316.shape, 0.0, tmp316.dtype)
tmp318 = tl.where(tmp302, tmp316, tmp317)
tmp319 = tmp279 & tmp302
tmp320 = tl.where(tmp302, tmp281, tmp282)
tmp321 = tl.where(tmp2, tmp320, tmp265)
tmp322 = tl.where(tmp2, tmp318, tmp321)
tmp323 = tl.where(tmp272, tmp301, tmp322)
tmp324 = tl.full(tmp323.shape, 0.0, tmp323.dtype)
tmp325 = tl.where(tmp270, tmp323, tmp324)
tmp326 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp273 & xmask, other=0.0)
tmp327 = tmp298 + tmp326
tmp328 = tl.full(tmp327.shape, 0.0, tmp327.dtype)
tmp329 = tl.where(tmp273, tmp327, tmp328)
tmp330 = tl.where(tmp272, tmp329, tmp321)
tmp331 = tl.full(tmp330.shape, 0.0, tmp330.dtype)
tmp332 = tl.where(tmp270, tmp330, tmp331)
tmp333 = tmp279 & tmp270
tmp334 = tl.where(tmp270, tmp281, tmp282)
tmp335 = tl.where(tmp2, tmp334, tmp265)
tmp336 = tl.where(tmp2, tmp332, tmp335)
tmp337 = tl.where(tmp2, tmp325, tmp336)
tmp338 = tl.load(in_ptr1 + (48 + x0 + (4*x1)), tmp269 & xmask, other=0.0)
tmp339 = tmp337 + tmp338
tmp340 = tl.full(tmp339.shape, 0.0, tmp339.dtype)
tmp341 = tl.where(tmp269, tmp339, tmp340)
tmp342 = tmp2 & tmp268
tmp343 = tmp272 & tmp342
tmp344 = tmp2 & tmp343
tmp345 = tmp272 & tmp344
tmp346 = tmp2 & tmp345
tmp347 = tmp279 & tmp346
tmp348 = tl.where(tmp346, tmp281, tmp282)
tmp349 = tl.where(tmp2, tmp348, tmp265)
tmp350 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp345 & xmask, other=0.0)
tmp351 = tmp349 + tmp350
tmp352 = tl.full(tmp351.shape, 0.0, tmp351.dtype)
tmp353 = tl.where(tmp345, tmp351, tmp352)
tmp354 = tmp2 & tmp344
tmp355 = tmp279 & tmp354
tmp356 = tl.where(tmp354, tmp281, tmp282)
tmp357 = tl.where(tmp2, tmp356, tmp265)
tmp358 = tl.where(tmp272, tmp353, tmp357)
tmp359 = tl.full(tmp358.shape, 0.0, tmp358.dtype)
tmp360 = tl.where(tmp344, tmp358, tmp359)
tmp361 = tmp279 & tmp344
tmp362 = tl.where(tmp344, tmp281, tmp282)
tmp363 = tl.where(tmp2, tmp362, tmp265)
tmp364 = tl.where(tmp2, tmp360, tmp363)
tmp365 = tl.full(tmp364.shape, 0.0, tmp364.dtype)
tmp366 = tl.where(tmp343, tmp364, tmp365)
tmp367 = tmp2 & tmp342
tmp368 = tmp272 & tmp367
tmp369 = tmp2 & tmp368
tmp370 = tmp279 & tmp369
tmp371 = tl.where(tmp369, tmp281, tmp282)
tmp372 = tl.where(tmp2, tmp371, tmp265)
tmp373 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp368 & xmask, other=0.0)
tmp374 = tmp372 + tmp373
tmp375 = tl.full(tmp374.shape, 0.0, tmp374.dtype)
tmp376 = tl.where(tmp368, tmp374, tmp375)
tmp377 = tmp2 & tmp367
tmp378 = tmp279 & tmp377
tmp379 = tl.where(tmp377, tmp281, tmp282)
tmp380 = tl.where(tmp2, tmp379, tmp265)
tmp381 = tl.where(tmp272, tmp376, tmp380)
tmp382 = tl.full(tmp381.shape, 0.0, tmp381.dtype)
tmp383 = tl.where(tmp367, tmp381, tmp382)
tmp384 = tmp279 & tmp367
tmp385 = tl.where(tmp367, tmp281, tmp282)
tmp386 = tl.where(tmp2, tmp385, tmp265)
tmp387 = tl.where(tmp2, tmp383, tmp386)
tmp388 = tl.where(tmp272, tmp366, tmp387)
tmp389 = tl.full(tmp388.shape, 0.0, tmp388.dtype)
tmp390 = tl.where(tmp342, tmp388, tmp389)
tmp391 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp343 & xmask, other=0.0)
tmp392 = tmp363 + tmp391
tmp393 = tl.full(tmp392.shape, 0.0, tmp392.dtype)
tmp394 = tl.where(tmp343, tmp392, tmp393)
tmp395 = tl.where(tmp272, tmp394, tmp386)
tmp396 = tl.full(tmp395.shape, 0.0, tmp395.dtype)
tmp397 = tl.where(tmp342, tmp395, tmp396)
tmp398 = tmp279 & tmp342
tmp399 = tl.where(tmp342, tmp281, tmp282)
tmp400 = tl.where(tmp2, tmp399, tmp265)
tmp401 = tl.where(tmp2, tmp397, tmp400)
tmp402 = tl.where(tmp2, tmp390, tmp401)
tmp403 = tl.where(tmp13, tmp341, tmp402)
tmp404 = tl.full(tmp403.shape, 0.0, tmp403.dtype)
tmp405 = tl.where(tmp268, tmp403, tmp404)
tmp406 = tmp272 & tmp2
tmp407 = tmp2 & tmp406
tmp408 = tmp272 & tmp407
tmp409 = tmp2 & tmp408
tmp410 = tmp279 & tmp409
tmp411 = tl.where(tmp409, tmp281, tmp282)
tmp412 = tl.where(tmp2, tmp411, tmp265)
tmp413 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp408 & xmask, other=0.0)
tmp414 = tmp412 + tmp413
tmp415 = tl.full(tmp414.shape, 0.0, tmp414.dtype)
tmp416 = tl.where(tmp408, tmp414, tmp415)
tmp417 = tmp2 & tmp407
tmp418 = tmp279 & tmp417
tmp419 = tl.where(tmp417, tmp281, tmp282)
tmp420 = tl.where(tmp2, tmp419, tmp265)
tmp421 = tl.where(tmp272, tmp416, tmp420)
tmp422 = tl.full(tmp421.shape, 0.0, tmp421.dtype)
tmp423 = tl.where(tmp407, tmp421, tmp422)
tmp424 = tmp279 & tmp407
tmp425 = tl.where(tmp407, tmp281, tmp282)
tmp426 = tl.where(tmp2, tmp425, tmp265)
tmp427 = tl.where(tmp2, tmp423, tmp426)
tmp428 = tl.full(tmp427.shape, 0.0, tmp427.dtype)
tmp429 = tl.where(tmp406, tmp427, tmp428)
tmp430 = tmp272 & tmp121
tmp431 = tmp2 & tmp430
tmp432 = tmp279 & tmp431
tmp433 = tl.where(tmp431, tmp281, tmp282)
tmp434 = tl.where(tmp2, tmp433, tmp265)
tmp435 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp430 & xmask, other=0.0)
tmp436 = tmp434 + tmp435
tmp437 = tl.full(tmp436.shape, 0.0, tmp436.dtype)
tmp438 = tl.where(tmp430, tmp436, tmp437)
tmp439 = tmp279 & tmp163
tmp440 = tl.where(tmp163, tmp281, tmp282)
tmp441 = tl.where(tmp2, tmp440, tmp265)
tmp442 = tl.where(tmp272, tmp438, tmp441)
tmp443 = tl.full(tmp442.shape, 0.0, tmp442.dtype)
tmp444 = tl.where(tmp121, tmp442, tmp443)
tmp445 = tmp279 & tmp121
tmp446 = tl.where(tmp121, tmp281, tmp282)
tmp447 = tl.where(tmp2, tmp446, tmp265)
tmp448 = tl.where(tmp2, tmp444, tmp447)
tmp449 = tl.where(tmp272, tmp429, tmp448)
tmp450 = tl.full(tmp449.shape, 0.0, tmp449.dtype)
tmp451 = tl.where(tmp2, tmp449, tmp450)
tmp452 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp406 & xmask, other=0.0)
tmp453 = tmp426 + tmp452
tmp454 = tl.full(tmp453.shape, 0.0, tmp453.dtype)
tmp455 = tl.where(tmp406, tmp453, tmp454)
tmp456 = tl.where(tmp272, tmp455, tmp447)
tmp457 = tl.full(tmp456.shape, 0.0, tmp456.dtype)
tmp458 = tl.where(tmp2, tmp456, tmp457)
tmp459 = tmp279 & tmp2
tmp460 = tl.where(tmp2, tmp281, tmp282)
tmp461 = tl.where(tmp2, tmp460, tmp265)
tmp462 = tl.where(tmp2, tmp458, tmp461)
tmp463 = tl.where(tmp2, tmp451, tmp462)
tmp464 = tl.where(tmp268, tmp405, tmp463)
tl.store(in_out_ptr0 + (x2), tmp464, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6u/c6ueeomzrz3xolldfp7disiqma6bv2ommvisq6xyzxkbax272luc.py
# Topologically Sorted Source Nodes: [iadd_6], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_6 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_100, %select_13), kwargs = {})
# %slice_scatter_default_24 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_12, %add_6, 1, 8, 12), kwargs = {})
# %slice_scatter_default_26 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_13, %slice_103, 1, 8, 12), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 43, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp200 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 4 + x1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp6 >= tmp7
tmp9 = tmp6 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp5
tmp12 = tmp0 >= tmp7
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp11
tmp16 = tmp10 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tmp10 & tmp17
tmp19 = tmp0 < tmp7
tmp20 = tmp19 & tmp18
tmp21 = tl.load(in_ptr0 + (64 + x2), tmp20 & xmask, other=0.0)
tmp22 = tl.load(in_ptr0 + (64 + x2), tmp18 & xmask, other=0.0)
tmp23 = tl.where(tmp19, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp18, tmp23, tmp24)
tmp26 = tl.load(in_ptr0 + (64 + x2), tmp17 & xmask, other=0.0)
tmp27 = tl.where(tmp10, tmp25, tmp26)
tmp28 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp17 & xmask, other=0.0)
tmp29 = tmp27 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp17, tmp29, tmp30)
tmp32 = tmp10 & tmp16
tmp33 = tmp19 & tmp32
tmp34 = tl.load(in_ptr0 + (64 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr0 + (64 + x2), tmp32 & xmask, other=0.0)
tmp36 = tl.where(tmp19, tmp34, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp32, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (64 + x2), tmp16 & xmask, other=0.0)
tmp40 = tl.where(tmp10, tmp38, tmp39)
tmp41 = tl.where(tmp14, tmp31, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp16, tmp41, tmp42)
tmp44 = tmp19 & tmp16
tmp45 = tl.load(in_ptr0 + (64 + x2), tmp44 & xmask, other=0.0)
tmp46 = tl.where(tmp19, tmp45, tmp39)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp16, tmp46, tmp47)
tmp49 = tl.load(in_ptr0 + (64 + x2), tmp15 & xmask, other=0.0)
tmp50 = tl.where(tmp10, tmp48, tmp49)
tmp51 = tl.where(tmp10, tmp43, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp15, tmp51, tmp52)
tmp54 = tmp10 & tmp11
tmp55 = tmp14 & tmp54
tmp56 = tmp10 & tmp55
tmp57 = tmp19 & tmp56
tmp58 = tl.load(in_ptr0 + (64 + x2), tmp57 & xmask, other=0.0)
tmp59 = tl.load(in_ptr0 + (64 + x2), tmp56 & xmask, other=0.0)
tmp60 = tl.where(tmp19, tmp58, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.load(in_ptr0 + (64 + x2), tmp55 & xmask, other=0.0)
tmp64 = tl.where(tmp10, tmp62, tmp63)
tmp65 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp55 & xmask, other=0.0)
tmp66 = tmp64 + tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp55, tmp66, tmp67)
tmp69 = tmp10 & tmp54
tmp70 = tmp19 & tmp69
tmp71 = tl.load(in_ptr0 + (64 + x2), tmp70 & xmask, other=0.0)
tmp72 = tl.load(in_ptr0 + (64 + x2), tmp69 & xmask, other=0.0)
tmp73 = tl.where(tmp19, tmp71, tmp72)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp69, tmp73, tmp74)
tmp76 = tl.load(in_ptr0 + (64 + x2), tmp54 & xmask, other=0.0)
tmp77 = tl.where(tmp10, tmp75, tmp76)
tmp78 = tl.where(tmp14, tmp68, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tmp19 & tmp54
tmp82 = tl.load(in_ptr0 + (64 + x2), tmp81 & xmask, other=0.0)
tmp83 = tl.where(tmp19, tmp82, tmp76)
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp54, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (64 + x2), tmp11 & xmask, other=0.0)
tmp87 = tl.where(tmp10, tmp85, tmp86)
tmp88 = tl.where(tmp10, tmp80, tmp87)
tmp89 = tl.where(tmp14, tmp53, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp11, tmp89, tmp90)
tmp92 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp93 = tmp50 + tmp92
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp15, tmp93, tmp94)
tmp96 = tl.where(tmp14, tmp95, tmp87)
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp11, tmp96, tmp97)
tmp99 = tmp19 & tmp11
tmp100 = tl.load(in_ptr0 + (64 + x2), tmp99 & xmask, other=0.0)
tmp101 = tl.where(tmp19, tmp100, tmp86)
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp11, tmp101, tmp102)
tmp104 = tl.load(in_ptr0 + (64 + x2), tmp5 & xmask, other=0.0)
tmp105 = tl.where(tmp10, tmp103, tmp104)
tmp106 = tl.where(tmp10, tmp98, tmp105)
tmp107 = tl.where(tmp10, tmp91, tmp106)
tmp108 = tl.load(in_ptr1 + (88 + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tmp109 = tmp107 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp5, tmp109, tmp110)
tmp112 = tmp14 & tmp10
tmp113 = tmp10 & tmp112
tmp114 = tmp14 & tmp113
tmp115 = tmp10 & tmp114
tmp116 = tmp19 & tmp115
tmp117 = tl.load(in_ptr0 + (64 + x2), tmp116 & xmask, other=0.0)
tmp118 = tl.load(in_ptr0 + (64 + x2), tmp115 & xmask, other=0.0)
tmp119 = tl.where(tmp19, tmp117, tmp118)
tmp120 = tl.full(tmp119.shape, 0.0, tmp119.dtype)
tmp121 = tl.where(tmp115, tmp119, tmp120)
tmp122 = tl.load(in_ptr0 + (64 + x2), tmp114 & xmask, other=0.0)
tmp123 = tl.where(tmp10, tmp121, tmp122)
tmp124 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp114 & xmask, other=0.0)
tmp125 = tmp123 + tmp124
tmp126 = tl.full(tmp125.shape, 0.0, tmp125.dtype)
tmp127 = tl.where(tmp114, tmp125, tmp126)
tmp128 = tmp10 & tmp113
tmp129 = tmp19 & tmp128
tmp130 = tl.load(in_ptr0 + (64 + x2), tmp129 & xmask, other=0.0)
tmp131 = tl.load(in_ptr0 + (64 + x2), tmp128 & xmask, other=0.0)
tmp132 = tl.where(tmp19, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp128, tmp132, tmp133)
tmp135 = tl.load(in_ptr0 + (64 + x2), tmp113 & xmask, other=0.0)
tmp136 = tl.where(tmp10, tmp134, tmp135)
tmp137 = tl.where(tmp14, tmp127, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp113, tmp137, tmp138)
tmp140 = tmp19 & tmp113
tmp141 = tl.load(in_ptr0 + (64 + x2), tmp140 & xmask, other=0.0)
tmp142 = tl.where(tmp19, tmp141, tmp135)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp113, tmp142, tmp143)
tmp145 = tl.load(in_ptr0 + (64 + x2), tmp112 & xmask, other=0.0)
tmp146 = tl.where(tmp10, tmp144, tmp145)
tmp147 = tl.where(tmp10, tmp139, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp112, tmp147, tmp148)
tmp150 = tmp10 & tmp10
tmp151 = tmp14 & tmp150
tmp152 = tmp10 & tmp151
tmp153 = tmp19 & tmp152
tmp154 = tl.load(in_ptr0 + (64 + x2), tmp153 & xmask, other=0.0)
tmp155 = tl.load(in_ptr0 + (64 + x2), tmp152 & xmask, other=0.0)
tmp156 = tl.where(tmp19, tmp154, tmp155)
tmp157 = tl.full(tmp156.shape, 0.0, tmp156.dtype)
tmp158 = tl.where(tmp152, tmp156, tmp157)
tmp159 = tl.load(in_ptr0 + (64 + x2), tmp151 & xmask, other=0.0)
tmp160 = tl.where(tmp10, tmp158, tmp159)
tmp161 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp151 & xmask, other=0.0)
tmp162 = tmp160 + tmp161
tmp163 = tl.full(tmp162.shape, 0.0, tmp162.dtype)
tmp164 = tl.where(tmp151, tmp162, tmp163)
tmp165 = tmp10 & tmp150
tmp166 = tmp19 & tmp165
tmp167 = tl.load(in_ptr0 + (64 + x2), tmp166 & xmask, other=0.0)
tmp168 = tl.load(in_ptr0 + (64 + x2), tmp165 & xmask, other=0.0)
tmp169 = tl.where(tmp19, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp165, tmp169, tmp170)
tmp172 = tl.load(in_ptr0 + (64 + x2), tmp150 & xmask, other=0.0)
tmp173 = tl.where(tmp10, tmp171, tmp172)
tmp174 = tl.where(tmp14, tmp164, tmp173)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp150, tmp174, tmp175)
tmp177 = tmp19 & tmp150
tmp178 = tl.load(in_ptr0 + (64 + x2), tmp177 & xmask, other=0.0)
tmp179 = tl.where(tmp19, tmp178, tmp172)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp150, tmp179, tmp180)
tmp182 = tl.load(in_ptr0 + (64 + x2), tmp10 & xmask, other=0.0)
tmp183 = tl.where(tmp10, tmp181, tmp182)
tmp184 = tl.where(tmp10, tmp176, tmp183)
tmp185 = tl.where(tmp14, tmp149, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp10, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp112 & xmask, other=0.0)
tmp189 = tmp146 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp112, tmp189, tmp190)
tmp192 = tl.where(tmp14, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp10, tmp192, tmp193)
tmp195 = tmp19 & tmp10
tmp196 = tl.load(in_ptr0 + (64 + x2), tmp195 & xmask, other=0.0)
tmp197 = tl.where(tmp19, tmp196, tmp182)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp10, tmp197, tmp198)
tmp201 = tl.where(tmp10, tmp199, tmp200)
tmp202 = tl.where(tmp10, tmp194, tmp201)
tmp203 = tl.where(tmp10, tmp187, tmp202)
tmp204 = tl.where(tmp5, tmp111, tmp203)
tmp205 = tl.where(tmp10, tmp204, tmp107)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp5, tmp205, tmp206)
tmp208 = tl.where(tmp10, tmp204, tmp203)
tmp209 = tl.where(tmp5, tmp207, tmp208)
tl.store(out_ptr0 + (x2), tmp204, xmask)
tl.store(out_ptr1 + (x2), tmp209, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ft/cftz6fore27oifq7z2wzi27lwhb6xj2rstig24xl2zbr4ddn3vvx.py
# Topologically Sorted Source Nodes: [iadd_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_5 => add_5
# Graph fragment:
# %slice_scatter_default_18 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_9, %slice_71, 1, 0, 4), kwargs = {})
# %slice_scatter_default_19 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %slice_scatter_default_18, 0, 4, 8), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_84, %select_11), kwargs = {})
# %slice_scatter_default_20 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_10, %add_5, 1, 4, 8), kwargs = {})
# %slice_scatter_default_21 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %slice_scatter_default_20, 0, 4, 8), kwargs = {})
# %slice_scatter_default_22 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_11, %slice_87, 1, 4, 8), kwargs = {})
# %slice_scatter_default_23 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %slice_scatter_default_22, 0, 4, 8), kwargs = {})
# %slice_scatter_default_25 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %slice_scatter_default_24, 0, 4, 8), kwargs = {})
# %slice_scatter_default_27 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %slice_scatter_default_26, 0, 4, 8), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 23, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp101 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-64) + x2), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + ((-64) + x2), tmp5 & xmask, other=0.0)
tmp8 = x0
tmp9 = tmp8 >= tmp1
tmp10 = tmp8 < tmp3
tmp11 = tmp9 & tmp10
tmp12 = tmp11 & tmp5
tmp13 = tmp5 & tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp5 & tmp14
tmp16 = tmp8 < tmp1
tmp17 = tmp16 & tmp15
tmp18 = tl.load(in_out_ptr0 + (x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_out_ptr0 + (x2), tmp15 & xmask, other=0.0)
tmp20 = tl.where(tmp16, tmp18, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp15, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + (x2), tmp14 & xmask, other=0.0)
tmp24 = tl.where(tmp5, tmp22, tmp23)
tmp25 = tl.load(in_ptr2 + (60 + x0 + (4*x1)), tmp14 & xmask, other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp14, tmp26, tmp27)
tmp29 = tmp5 & tmp13
tmp30 = tmp16 & tmp29
tmp31 = tl.load(in_out_ptr0 + (x2), tmp30 & xmask, other=0.0)
tmp32 = tl.load(in_out_ptr0 + (x2), tmp29 & xmask, other=0.0)
tmp33 = tl.where(tmp16, tmp31, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp29, tmp33, tmp34)
tmp36 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp37 = tl.where(tmp5, tmp35, tmp36)
tmp38 = tl.where(tmp11, tmp28, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp13, tmp38, tmp39)
tmp41 = tmp16 & tmp13
tmp42 = tl.load(in_out_ptr0 + (x2), tmp41 & xmask, other=0.0)
tmp43 = tl.where(tmp16, tmp42, tmp36)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp13, tmp43, tmp44)
tmp46 = tl.load(in_out_ptr0 + (x2), tmp12 & xmask, other=0.0)
tmp47 = tl.where(tmp5, tmp45, tmp46)
tmp48 = tl.where(tmp5, tmp40, tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp12, tmp48, tmp49)
tmp51 = tmp5 & tmp5
tmp52 = tmp11 & tmp51
tmp53 = tmp5 & tmp52
tmp54 = tmp16 & tmp53
tmp55 = tl.load(in_out_ptr0 + (x2), tmp54 & xmask, other=0.0)
tmp56 = tl.load(in_out_ptr0 + (x2), tmp53 & xmask, other=0.0)
tmp57 = tl.where(tmp16, tmp55, tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp53, tmp57, tmp58)
tmp60 = tl.load(in_out_ptr0 + (x2), tmp52 & xmask, other=0.0)
tmp61 = tl.where(tmp5, tmp59, tmp60)
tmp62 = tl.load(in_ptr2 + (60 + x0 + (4*x1)), tmp52 & xmask, other=0.0)
tmp63 = tmp61 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp52, tmp63, tmp64)
tmp66 = tmp5 & tmp51
tmp67 = tmp16 & tmp66
tmp68 = tl.load(in_out_ptr0 + (x2), tmp67 & xmask, other=0.0)
tmp69 = tl.load(in_out_ptr0 + (x2), tmp66 & xmask, other=0.0)
tmp70 = tl.where(tmp16, tmp68, tmp69)
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp66, tmp70, tmp71)
tmp73 = tl.load(in_out_ptr0 + (x2), tmp51 & xmask, other=0.0)
tmp74 = tl.where(tmp5, tmp72, tmp73)
tmp75 = tl.where(tmp11, tmp65, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp51, tmp75, tmp76)
tmp78 = tmp16 & tmp51
tmp79 = tl.load(in_out_ptr0 + (x2), tmp78 & xmask, other=0.0)
tmp80 = tl.where(tmp16, tmp79, tmp73)
tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype)
tmp82 = tl.where(tmp51, tmp80, tmp81)
tmp83 = tl.load(in_out_ptr0 + (x2), tmp5 & xmask, other=0.0)
tmp84 = tl.where(tmp5, tmp82, tmp83)
tmp85 = tl.where(tmp5, tmp77, tmp84)
tmp86 = tl.where(tmp11, tmp50, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp5, tmp86, tmp87)
tmp89 = tl.load(in_ptr2 + (60 + x0 + (4*x1)), tmp12 & xmask, other=0.0)
tmp90 = tmp47 + tmp89
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp12, tmp90, tmp91)
tmp93 = tl.where(tmp11, tmp92, tmp84)
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp5, tmp93, tmp94)
tmp96 = tmp16 & tmp5
tmp97 = tl.load(in_out_ptr0 + (x2), tmp96 & xmask, other=0.0)
tmp98 = tl.where(tmp16, tmp97, tmp83)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp5, tmp98, tmp99)
tmp102 = tl.where(tmp5, tmp100, tmp101)
tmp103 = tl.where(tmp5, tmp95, tmp102)
tmp104 = tl.where(tmp5, tmp88, tmp103)
tmp105 = tl.where(tmp5, tmp7, tmp104)
tmp106 = tl.where(tmp5, tmp6, tmp105)
tl.store(in_out_ptr0 + (x2), tmp106, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3f/c3fjcanlofb532fzzefseogkvlol76s72bgjgl7ny5uerbji6frd.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %slice_scatter_default_34 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_17, %slice_135, 1, 0, 4), kwargs = {})
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 62, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp287 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp3 >= tmp1
tmp12 = tmp3 < tmp4
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp0 >= tmp6
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr0 + (128 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (128 + x2), tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_ptr0 + (128 + x2), tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_ptr0 + (128 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (128 + x2), tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_ptr0 + (128 + x2), tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_ptr0 + (128 + x2), tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (128 + x0 + (4*x1)), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp9
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_ptr0 + (128 + x2), tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_ptr0 + (128 + x2), tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_ptr0 + (128 + x2), tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_ptr0 + (128 + x2), tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (128 + x2), tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_ptr0 + (128 + x2), tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (128 + x2), tmp9 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp2, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tmp13 & tmp2
tmp109 = tmp15 & tmp108
tmp110 = tmp13 & tmp109
tmp111 = tmp15 & tmp110
tmp112 = tl.load(in_ptr0 + (128 + x2), tmp111 & xmask, other=0.0)
tmp113 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp111 & xmask, other=0.0)
tmp114 = tmp112 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp111, tmp114, tmp115)
tmp117 = tl.load(in_ptr0 + (128 + x2), tmp110 & xmask, other=0.0)
tmp118 = tl.where(tmp15, tmp116, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp110, tmp118, tmp119)
tmp121 = tl.load(in_ptr0 + (128 + x2), tmp109 & xmask, other=0.0)
tmp122 = tl.where(tmp13, tmp120, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp109, tmp122, tmp123)
tmp125 = tmp13 & tmp108
tmp126 = tmp15 & tmp125
tmp127 = tl.load(in_ptr0 + (128 + x2), tmp126 & xmask, other=0.0)
tmp128 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp126 & xmask, other=0.0)
tmp129 = tmp127 + tmp128
tmp130 = tl.full(tmp129.shape, 0.0, tmp129.dtype)
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.load(in_ptr0 + (128 + x2), tmp125 & xmask, other=0.0)
tmp133 = tl.where(tmp15, tmp131, tmp132)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp125, tmp133, tmp134)
tmp136 = tl.load(in_ptr0 + (128 + x2), tmp108 & xmask, other=0.0)
tmp137 = tl.where(tmp13, tmp135, tmp136)
tmp138 = tl.where(tmp15, tmp124, tmp137)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp108, tmp138, tmp139)
tmp141 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp109 & xmask, other=0.0)
tmp142 = tmp121 + tmp141
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp109, tmp142, tmp143)
tmp145 = tl.where(tmp15, tmp144, tmp136)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp108, tmp145, tmp146)
tmp148 = tl.load(in_ptr0 + (128 + x2), tmp2 & xmask, other=0.0)
tmp149 = tl.where(tmp13, tmp147, tmp148)
tmp150 = tl.where(tmp13, tmp140, tmp149)
tmp151 = tl.where(tmp8, tmp107, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp2, tmp151, tmp152)
tmp154 = tmp2 & tmp8
tmp155 = tmp13 & tmp154
tmp156 = tmp15 & tmp155
tmp157 = tmp13 & tmp156
tmp158 = tmp15 & tmp157
tmp159 = tl.load(in_ptr0 + (128 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (128 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp15, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (128 + x2), tmp156 & xmask, other=0.0)
tmp169 = tl.where(tmp13, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp156, tmp169, tmp170)
tmp172 = tmp13 & tmp155
tmp173 = tmp15 & tmp172
tmp174 = tl.load(in_ptr0 + (128 + x2), tmp173 & xmask, other=0.0)
tmp175 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp173 & xmask, other=0.0)
tmp176 = tmp174 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp173, tmp176, tmp177)
tmp179 = tl.load(in_ptr0 + (128 + x2), tmp172 & xmask, other=0.0)
tmp180 = tl.where(tmp15, tmp178, tmp179)
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp172, tmp180, tmp181)
tmp183 = tl.load(in_ptr0 + (128 + x2), tmp155 & xmask, other=0.0)
tmp184 = tl.where(tmp13, tmp182, tmp183)
tmp185 = tl.where(tmp15, tmp171, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp155, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp156 & xmask, other=0.0)
tmp189 = tmp168 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp156, tmp189, tmp190)
tmp192 = tl.where(tmp15, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp155, tmp192, tmp193)
tmp195 = tl.load(in_ptr0 + (128 + x2), tmp154 & xmask, other=0.0)
tmp196 = tl.where(tmp13, tmp194, tmp195)
tmp197 = tl.where(tmp13, tmp187, tmp196)
tmp198 = tl.load(in_ptr1 + (128 + x0 + (4*x1)), tmp154 & xmask, other=0.0)
tmp199 = tmp197 + tmp198
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp154, tmp199, tmp200)
tmp202 = tmp13 & tmp8
tmp203 = tmp15 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tmp15 & tmp204
tmp206 = tl.load(in_ptr0 + (128 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (128 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp15, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (128 + x2), tmp203 & xmask, other=0.0)
tmp216 = tl.where(tmp13, tmp214, tmp215)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp203, tmp216, tmp217)
tmp219 = tmp13 & tmp202
tmp220 = tmp15 & tmp219
tmp221 = tl.load(in_ptr0 + (128 + x2), tmp220 & xmask, other=0.0)
tmp222 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp220 & xmask, other=0.0)
tmp223 = tmp221 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp220, tmp223, tmp224)
tmp226 = tl.load(in_ptr0 + (128 + x2), tmp219 & xmask, other=0.0)
tmp227 = tl.where(tmp15, tmp225, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp219, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (128 + x2), tmp202 & xmask, other=0.0)
tmp231 = tl.where(tmp13, tmp229, tmp230)
tmp232 = tl.where(tmp15, tmp218, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp202, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp203 & xmask, other=0.0)
tmp236 = tmp215 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp203, tmp236, tmp237)
tmp239 = tl.where(tmp15, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp202, tmp239, tmp240)
tmp242 = tl.load(in_ptr0 + (128 + x2), tmp8 & xmask, other=0.0)
tmp243 = tl.where(tmp13, tmp241, tmp242)
tmp244 = tl.where(tmp13, tmp234, tmp243)
tmp245 = tl.where(tmp2, tmp201, tmp244)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp8, tmp245, tmp246)
tmp248 = tmp15 & tmp13
tmp249 = tmp13 & tmp248
tmp250 = tmp15 & tmp249
tmp251 = tl.load(in_ptr0 + (128 + x2), tmp250 & xmask, other=0.0)
tmp252 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp250 & xmask, other=0.0)
tmp253 = tmp251 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp250, tmp253, tmp254)
tmp256 = tl.load(in_ptr0 + (128 + x2), tmp249 & xmask, other=0.0)
tmp257 = tl.where(tmp15, tmp255, tmp256)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp249, tmp257, tmp258)
tmp260 = tl.load(in_ptr0 + (128 + x2), tmp248 & xmask, other=0.0)
tmp261 = tl.where(tmp13, tmp259, tmp260)
tmp262 = tl.full(tmp261.shape, 0.0, tmp261.dtype)
tmp263 = tl.where(tmp248, tmp261, tmp262)
tmp264 = tmp13 & tmp13
tmp265 = tmp15 & tmp264
tmp266 = tl.load(in_ptr0 + (128 + x2), tmp265 & xmask, other=0.0)
tmp267 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp265 & xmask, other=0.0)
tmp268 = tmp266 + tmp267
tmp269 = tl.full(tmp268.shape, 0.0, tmp268.dtype)
tmp270 = tl.where(tmp265, tmp268, tmp269)
tmp271 = tl.load(in_ptr0 + (128 + x2), tmp264 & xmask, other=0.0)
tmp272 = tl.where(tmp15, tmp270, tmp271)
tmp273 = tl.full(tmp272.shape, 0.0, tmp272.dtype)
tmp274 = tl.where(tmp264, tmp272, tmp273)
tmp275 = tl.load(in_ptr0 + (128 + x2), tmp13 & xmask, other=0.0)
tmp276 = tl.where(tmp13, tmp274, tmp275)
tmp277 = tl.where(tmp15, tmp263, tmp276)
tmp278 = tl.full(tmp277.shape, 0.0, tmp277.dtype)
tmp279 = tl.where(tmp13, tmp277, tmp278)
tmp280 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp248 & xmask, other=0.0)
tmp281 = tmp260 + tmp280
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp248, tmp281, tmp282)
tmp284 = tl.where(tmp15, tmp283, tmp275)
tmp285 = tl.full(tmp284.shape, 0.0, tmp284.dtype)
tmp286 = tl.where(tmp13, tmp284, tmp285)
tmp288 = tl.where(tmp13, tmp286, tmp287)
tmp289 = tl.where(tmp13, tmp279, tmp288)
tmp290 = tl.where(tmp8, tmp247, tmp289)
tmp291 = tl.where(tmp2, tmp153, tmp290)
tl.store(out_ptr0 + (x2), tmp291, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/py/cpytp2auwhinkwdqpacccqnydcfnih56cbanca53bbl4nghkctqs.py
# Topologically Sorted Source Nodes: [iadd_7, iadd_8, iadd_9, iadd_10], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_10 => add_10
# iadd_7 => add_7
# iadd_8 => add_8
# iadd_9 => add_9
# Graph fragment:
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_116, %select_15), kwargs = {})
# %slice_scatter_default_28 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_14, %add_7, 1, 12, 16), kwargs = {})
# %slice_scatter_default_29 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %slice_scatter_default_28, 0, 4, 8), kwargs = {})
# %slice_scatter_default_30 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_15, %slice_119, 1, 12, 16), kwargs = {})
# %slice_scatter_default_31 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %slice_scatter_default_30, 0, 4, 8), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_132, %select_17), kwargs = {})
# %slice_scatter_default_32 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_16, %add_8, 1, 0, 4), kwargs = {})
# %slice_scatter_default_33 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %slice_scatter_default_32, 0, 8, 12), kwargs = {})
# %slice_scatter_default_35 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %slice_scatter_default_34, 0, 8, 12), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_148, %select_19), kwargs = {})
# %slice_scatter_default_36 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_18, %add_9, 1, 4, 8), kwargs = {})
# %slice_scatter_default_37 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %slice_scatter_default_36, 0, 8, 12), kwargs = {})
# %slice_scatter_default_38 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_19, %slice_151, 1, 4, 8), kwargs = {})
# %slice_scatter_default_39 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %slice_scatter_default_38, 0, 8, 12), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_164, %select_21), kwargs = {})
# %slice_scatter_default_40 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_20, %add_10, 1, 8, 12), kwargs = {})
# %slice_scatter_default_41 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %slice_scatter_default_40, 0, 8, 12), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 41, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp147 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-128) + x2), tmp5 & xmask, other=0.0)
tmp7 = x0
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp9 & tmp5
tmp11 = tmp0 >= tmp8
tmp12 = tmp0 < tmp1
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp7 >= tmp3
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_out_ptr0 + (x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + (x2), tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_out_ptr0 + (x2), tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_out_ptr0 + (x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_out_ptr0 + (x2), tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_out_ptr0 + (x2), tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_out_ptr0 + (x2), tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (96 + x0 + (4*x1)), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp5
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_out_ptr0 + (x2), tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_out_ptr0 + (x2), tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_out_ptr0 + (x2), tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_out_ptr0 + (x2), tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_out_ptr0 + (x2), tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_out_ptr0 + (x2), tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_out_ptr0 + (x2), tmp5 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp9, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp5, tmp105, tmp106)
tmp108 = tmp15 & tmp13
tmp109 = tmp13 & tmp108
tmp110 = tmp15 & tmp109
tmp111 = tl.load(in_out_ptr0 + (x2), tmp110 & xmask, other=0.0)
tmp112 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp110 & xmask, other=0.0)
tmp113 = tmp111 + tmp112
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.load(in_out_ptr0 + (x2), tmp109 & xmask, other=0.0)
tmp117 = tl.where(tmp15, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp109, tmp117, tmp118)
tmp120 = tl.load(in_out_ptr0 + (x2), tmp108 & xmask, other=0.0)
tmp121 = tl.where(tmp13, tmp119, tmp120)
tmp122 = tl.full(tmp121.shape, 0.0, tmp121.dtype)
tmp123 = tl.where(tmp108, tmp121, tmp122)
tmp124 = tmp13 & tmp13
tmp125 = tmp15 & tmp124
tmp126 = tl.load(in_out_ptr0 + (x2), tmp125 & xmask, other=0.0)
tmp127 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp125 & xmask, other=0.0)
tmp128 = tmp126 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp125, tmp128, tmp129)
tmp131 = tl.load(in_out_ptr0 + (x2), tmp124 & xmask, other=0.0)
tmp132 = tl.where(tmp15, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp124, tmp132, tmp133)
tmp135 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp136 = tl.where(tmp13, tmp134, tmp135)
tmp137 = tl.where(tmp15, tmp123, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp13, tmp137, tmp138)
tmp140 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp108 & xmask, other=0.0)
tmp141 = tmp120 + tmp140
tmp142 = tl.full(tmp141.shape, 0.0, tmp141.dtype)
tmp143 = tl.where(tmp108, tmp141, tmp142)
tmp144 = tl.where(tmp15, tmp143, tmp135)
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp13, tmp144, tmp145)
tmp148 = tl.where(tmp13, tmp146, tmp147)
tmp149 = tl.where(tmp13, tmp139, tmp148)
tmp150 = tl.where(tmp5, tmp107, tmp149)
tmp151 = tl.where(tmp5, tmp6, tmp150)
tmp152 = tmp7 >= tmp1
tmp153 = tmp7 < tmp3
tmp154 = tmp152 & tmp153
tmp155 = tmp154 & tmp5
tmp156 = tmp5 & tmp155
tmp157 = tmp7 >= tmp8
tmp158 = tmp7 < tmp1
tmp159 = tmp157 & tmp158
tmp160 = tmp159 & tmp156
tmp161 = tmp5 & tmp160
tmp162 = tmp159 & tmp161
tmp163 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp162 & xmask, other=0.0)
tmp164 = tmp151 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp162, tmp164, tmp165)
tmp167 = tl.where(tmp159, tmp166, tmp151)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp161, tmp167, tmp168)
tmp170 = tl.where(tmp5, tmp169, tmp151)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp160, tmp170, tmp171)
tmp173 = tmp5 & tmp156
tmp174 = tmp159 & tmp173
tmp175 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp174 & xmask, other=0.0)
tmp176 = tmp151 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp174, tmp176, tmp177)
tmp179 = tl.where(tmp159, tmp178, tmp151)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp173, tmp179, tmp180)
tmp182 = tl.where(tmp5, tmp181, tmp151)
tmp183 = tl.where(tmp159, tmp172, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp156, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp160 & xmask, other=0.0)
tmp187 = tmp151 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp160, tmp187, tmp188)
tmp190 = tl.where(tmp159, tmp189, tmp151)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp156, tmp190, tmp191)
tmp193 = tl.where(tmp5, tmp192, tmp151)
tmp194 = tl.where(tmp5, tmp185, tmp193)
tmp195 = tl.load(in_ptr1 + (120 + x0 + (4*x1)), tmp155 & xmask, other=0.0)
tmp196 = tmp194 + tmp195
tmp197 = tl.full(tmp196.shape, 0.0, tmp196.dtype)
tmp198 = tl.where(tmp155, tmp196, tmp197)
tmp199 = tmp5 & tmp5
tmp200 = tmp159 & tmp199
tmp201 = tmp5 & tmp200
tmp202 = tmp159 & tmp201
tmp203 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp202 & xmask, other=0.0)
tmp204 = tmp151 + tmp203
tmp205 = tl.full(tmp204.shape, 0.0, tmp204.dtype)
tmp206 = tl.where(tmp202, tmp204, tmp205)
tmp207 = tl.where(tmp159, tmp206, tmp151)
tmp208 = tl.full(tmp207.shape, 0.0, tmp207.dtype)
tmp209 = tl.where(tmp201, tmp207, tmp208)
tmp210 = tl.where(tmp5, tmp209, tmp151)
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp200, tmp210, tmp211)
tmp213 = tmp5 & tmp199
tmp214 = tmp159 & tmp213
tmp215 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp214 & xmask, other=0.0)
tmp216 = tmp151 + tmp215
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp214, tmp216, tmp217)
tmp219 = tl.where(tmp159, tmp218, tmp151)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp213, tmp219, tmp220)
tmp222 = tl.where(tmp5, tmp221, tmp151)
tmp223 = tl.where(tmp159, tmp212, tmp222)
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp199, tmp223, tmp224)
tmp226 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp200 & xmask, other=0.0)
tmp227 = tmp151 + tmp226
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp200, tmp227, tmp228)
tmp230 = tl.where(tmp159, tmp229, tmp151)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp199, tmp230, tmp231)
tmp233 = tl.where(tmp5, tmp232, tmp151)
tmp234 = tl.where(tmp5, tmp225, tmp233)
tmp235 = tl.where(tmp154, tmp198, tmp234)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp5, tmp235, tmp236)
tmp238 = tmp159 & tmp5
tmp239 = tmp5 & tmp238
tmp240 = tmp159 & tmp239
tmp241 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp240 & xmask, other=0.0)
tmp242 = tmp151 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp159, tmp244, tmp151)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp5, tmp247, tmp151)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp159, tmp250, tmp233)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp5, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp238 & xmask, other=0.0)
tmp255 = tmp151 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp159, tmp257, tmp151)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp5, tmp258, tmp259)
tmp261 = tl.where(tmp5, tmp260, tmp151)
tmp262 = tl.where(tmp5, tmp253, tmp261)
tmp263 = tl.where(tmp5, tmp237, tmp262)
tl.store(in_out_ptr0 + (x2), tmp263, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ln/clnpe6as37knvaycmymqf2jkrvvvemfcqdlqzyl3bzkkywcxfyfp.py
# Topologically Sorted Source Nodes: [iadd_12], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_12 => add_12
# Graph fragment:
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_196, %select_25), kwargs = {})
# %slice_scatter_default_48 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_24, %add_12, 1, 0, 4), kwargs = {})
# %slice_scatter_default_50 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_25, %slice_199, 1, 0, 4), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 43, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp198 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 12 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp0 >= tmp6
tmp11 = tmp10 & tmp9
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp0 >= tmp4
tmp16 = tmp0 < tmp6
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_ptr0 + (192 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr0 + (192 + x2), tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (192 + x2), tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (192 + x2), tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_ptr0 + (192 + x2), tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_ptr0 + (192 + x2), tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp9
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_ptr0 + (192 + x2), tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_ptr0 + (192 + x2), tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_ptr0 + (192 + x2), tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_ptr0 + (192 + x2), tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp9, tmp87, tmp88)
tmp90 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp9, tmp94, tmp95)
tmp97 = tmp17 & tmp9
tmp98 = tl.load(in_ptr0 + (192 + x2), tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp9, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (192 + x2), tmp2 & xmask, other=0.0)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.load(in_ptr1 + (192 + x0 + (4*x1)), tmp2 & xmask, other=0.0)
tmp107 = tmp105 + tmp106
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp2, tmp107, tmp108)
tmp110 = tmp10 & tmp8
tmp111 = tmp8 & tmp110
tmp112 = tmp10 & tmp111
tmp113 = tmp8 & tmp112
tmp114 = tmp17 & tmp113
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp114 & xmask, other=0.0)
tmp116 = tl.load(in_ptr0 + (192 + x2), tmp113 & xmask, other=0.0)
tmp117 = tl.where(tmp17, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp113, tmp117, tmp118)
tmp120 = tl.load(in_ptr0 + (192 + x2), tmp112 & xmask, other=0.0)
tmp121 = tl.where(tmp8, tmp119, tmp120)
tmp122 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp112 & xmask, other=0.0)
tmp123 = tmp121 + tmp122
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp112, tmp123, tmp124)
tmp126 = tmp8 & tmp111
tmp127 = tmp17 & tmp126
tmp128 = tl.load(in_ptr0 + (192 + x2), tmp127 & xmask, other=0.0)
tmp129 = tl.load(in_ptr0 + (192 + x2), tmp126 & xmask, other=0.0)
tmp130 = tl.where(tmp17, tmp128, tmp129)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp126, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp111 & xmask, other=0.0)
tmp134 = tl.where(tmp8, tmp132, tmp133)
tmp135 = tl.where(tmp10, tmp125, tmp134)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp111, tmp135, tmp136)
tmp138 = tmp17 & tmp111
tmp139 = tl.load(in_ptr0 + (192 + x2), tmp138 & xmask, other=0.0)
tmp140 = tl.where(tmp17, tmp139, tmp133)
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp111, tmp140, tmp141)
tmp143 = tl.load(in_ptr0 + (192 + x2), tmp110 & xmask, other=0.0)
tmp144 = tl.where(tmp8, tmp142, tmp143)
tmp145 = tl.where(tmp8, tmp137, tmp144)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp110, tmp145, tmp146)
tmp148 = tmp8 & tmp8
tmp149 = tmp10 & tmp148
tmp150 = tmp8 & tmp149
tmp151 = tmp17 & tmp150
tmp152 = tl.load(in_ptr0 + (192 + x2), tmp151 & xmask, other=0.0)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp150 & xmask, other=0.0)
tmp154 = tl.where(tmp17, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp150, tmp154, tmp155)
tmp157 = tl.load(in_ptr0 + (192 + x2), tmp149 & xmask, other=0.0)
tmp158 = tl.where(tmp8, tmp156, tmp157)
tmp159 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp149 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp149, tmp160, tmp161)
tmp163 = tmp8 & tmp148
tmp164 = tmp17 & tmp163
tmp165 = tl.load(in_ptr0 + (192 + x2), tmp164 & xmask, other=0.0)
tmp166 = tl.load(in_ptr0 + (192 + x2), tmp163 & xmask, other=0.0)
tmp167 = tl.where(tmp17, tmp165, tmp166)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp163, tmp167, tmp168)
tmp170 = tl.load(in_ptr0 + (192 + x2), tmp148 & xmask, other=0.0)
tmp171 = tl.where(tmp8, tmp169, tmp170)
tmp172 = tl.where(tmp10, tmp162, tmp171)
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp148, tmp172, tmp173)
tmp175 = tmp17 & tmp148
tmp176 = tl.load(in_ptr0 + (192 + x2), tmp175 & xmask, other=0.0)
tmp177 = tl.where(tmp17, tmp176, tmp170)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp148, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp181 = tl.where(tmp8, tmp179, tmp180)
tmp182 = tl.where(tmp8, tmp174, tmp181)
tmp183 = tl.where(tmp10, tmp147, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp8, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp110 & xmask, other=0.0)
tmp187 = tmp144 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp110, tmp187, tmp188)
tmp190 = tl.where(tmp10, tmp189, tmp181)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp8, tmp190, tmp191)
tmp193 = tmp17 & tmp8
tmp194 = tl.load(in_ptr0 + (192 + x2), tmp193 & xmask, other=0.0)
tmp195 = tl.where(tmp17, tmp194, tmp180)
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp8, tmp195, tmp196)
tmp199 = tl.where(tmp8, tmp197, tmp198)
tmp200 = tl.where(tmp8, tmp192, tmp199)
tmp201 = tl.where(tmp8, tmp185, tmp200)
tmp202 = tl.where(tmp2, tmp109, tmp201)
tmp203 = tmp3 >= tmp6
tmp204 = tmp203 & tmp2
tmp205 = tl.where(tmp203, tmp202, tmp105)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp2, tmp205, tmp206)
tmp208 = tl.where(tmp203, tmp202, tmp201)
tmp209 = tl.where(tmp2, tmp207, tmp208)
tl.store(out_ptr0 + (x2), tmp202, xmask)
tl.store(out_ptr1 + (x2), tmp209, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ig/ciglefpf6tbo4ytp64nt6lhcnsuascdfr6hplbutd6npiqpgt2mh.py
# Topologically Sorted Source Nodes: [iadd_11], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_11 => add_11
# Graph fragment:
# %slice_scatter_default_42 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_21, %slice_167, 1, 8, 12), kwargs = {})
# %slice_scatter_default_43 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %slice_scatter_default_42, 0, 8, 12), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_180, %select_23), kwargs = {})
# %slice_scatter_default_44 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_22, %add_11, 1, 12, 16), kwargs = {})
# %slice_scatter_default_45 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %slice_scatter_default_44, 0, 8, 12), kwargs = {})
# %slice_scatter_default_46 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_23, %slice_183, 1, 12, 16), kwargs = {})
# %slice_scatter_default_47 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %slice_scatter_default_46, 0, 8, 12), kwargs = {})
# %slice_scatter_default_49 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %slice_scatter_default_48, 0, 12, 16), kwargs = {})
# %slice_scatter_default_51 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %slice_scatter_default_50, 0, 12, 16), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 23, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp102 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + ((-192) + x2), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + ((-192) + x2), tmp2 & xmask, other=0.0)
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp0 >= tmp5
tmp7 = tmp0 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp10 & tmp8
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp9 >= tmp5
tmp16 = tmp9 < tmp1
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_out_ptr0 + (x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_out_ptr0 + (x2), tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr2 + (132 + x0 + (4*x1)), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_out_ptr0 + (x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_out_ptr0 + (x2), tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_out_ptr0 + (x2), tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_out_ptr0 + (x2), tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_out_ptr0 + (x2), tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp8
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_out_ptr0 + (x2), tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_out_ptr0 + (x2), tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_out_ptr0 + (x2), tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr2 + (132 + x0 + (4*x1)), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_out_ptr0 + (x2), tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_out_ptr0 + (x2), tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + (x2), tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_out_ptr0 + (x2), tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_out_ptr0 + (x2), tmp8 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp8, tmp87, tmp88)
tmp90 = tl.load(in_ptr2 + (132 + x0 + (4*x1)), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp8, tmp94, tmp95)
tmp97 = tmp17 & tmp8
tmp98 = tl.load(in_out_ptr0 + (x2), tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp8, tmp99, tmp100)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.where(tmp2, tmp4, tmp105)
tmp107 = tl.where(tmp2, tmp3, tmp106)
tl.store(in_out_ptr0 + (x2), tmp107, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vr/cvregea6kqmjw7j5p7y5ofutcpo5akj3f4aziqv5ffirai74wdrq.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %slice_scatter_default_58 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_29, %slice_231, 1, 8, 12), kwargs = {})
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 54, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp259 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 12 + x1
tmp7 = tmp6 >= tmp3
tmp8 = tmp7 & tmp5
tmp9 = tmp5 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 >= tmp11
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp7 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + (192 + x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_ptr0 + (192 + x2), tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_ptr0 + (192 + x2), tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp7, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp7 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_ptr0 + (192 + x2), tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp7, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp7, tmp53, tmp54)
tmp56 = tl.where(tmp7, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (216 + x0 + (4*x1)), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp7 & tmp8
tmp62 = tmp14 & tmp61
tmp63 = tmp7 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_ptr0 + (192 + x2), tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp7, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp7 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_ptr0 + (192 + x2), tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_ptr0 + (192 + x2), tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp7, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp102 = tl.where(tmp7, tmp100, tmp101)
tmp103 = tl.where(tmp7, tmp93, tmp102)
tmp104 = tl.where(tmp5, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp8, tmp104, tmp105)
tmp107 = tmp14 & tmp8
tmp108 = tmp7 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_ptr0 + (192 + x2), tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_ptr0 + (192 + x2), tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp7, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp8, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp8, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp5 & xmask, other=0.0)
tmp134 = tl.where(tmp7, tmp132, tmp133)
tmp135 = tl.where(tmp7, tmp125, tmp134)
tmp136 = tl.where(tmp7, tmp106, tmp135)
tmp137 = tl.full(tmp136.shape, 0.0, tmp136.dtype)
tmp138 = tl.where(tmp5, tmp136, tmp137)
tmp139 = tmp5 & tmp7
tmp140 = tmp7 & tmp139
tmp141 = tmp14 & tmp140
tmp142 = tmp7 & tmp141
tmp143 = tmp14 & tmp142
tmp144 = tl.load(in_ptr0 + (192 + x2), tmp143 & xmask, other=0.0)
tmp145 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp143 & xmask, other=0.0)
tmp146 = tmp144 + tmp145
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp143, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (192 + x2), tmp142 & xmask, other=0.0)
tmp150 = tl.where(tmp14, tmp148, tmp149)
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp142, tmp150, tmp151)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp141 & xmask, other=0.0)
tmp154 = tl.where(tmp7, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp141, tmp154, tmp155)
tmp157 = tmp7 & tmp140
tmp158 = tmp14 & tmp157
tmp159 = tl.load(in_ptr0 + (192 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (192 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp14, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (192 + x2), tmp140 & xmask, other=0.0)
tmp169 = tl.where(tmp7, tmp167, tmp168)
tmp170 = tl.where(tmp14, tmp156, tmp169)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp140, tmp170, tmp171)
tmp173 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp141 & xmask, other=0.0)
tmp174 = tmp153 + tmp173
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp141, tmp174, tmp175)
tmp177 = tl.where(tmp14, tmp176, tmp168)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp140, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp139 & xmask, other=0.0)
tmp181 = tl.where(tmp7, tmp179, tmp180)
tmp182 = tl.where(tmp7, tmp172, tmp181)
tmp183 = tl.load(in_ptr1 + (216 + x0 + (4*x1)), tmp139 & xmask, other=0.0)
tmp184 = tmp182 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp139, tmp184, tmp185)
tmp187 = tmp7 & tmp7
tmp188 = tmp14 & tmp187
tmp189 = tmp7 & tmp188
tmp190 = tmp14 & tmp189
tmp191 = tl.load(in_ptr0 + (192 + x2), tmp190 & xmask, other=0.0)
tmp192 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp190 & xmask, other=0.0)
tmp193 = tmp191 + tmp192
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp190, tmp193, tmp194)
tmp196 = tl.load(in_ptr0 + (192 + x2), tmp189 & xmask, other=0.0)
tmp197 = tl.where(tmp14, tmp195, tmp196)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp189, tmp197, tmp198)
tmp200 = tl.load(in_ptr0 + (192 + x2), tmp188 & xmask, other=0.0)
tmp201 = tl.where(tmp7, tmp199, tmp200)
tmp202 = tl.full(tmp201.shape, 0.0, tmp201.dtype)
tmp203 = tl.where(tmp188, tmp201, tmp202)
tmp204 = tmp7 & tmp187
tmp205 = tmp14 & tmp204
tmp206 = tl.load(in_ptr0 + (192 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (192 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp14, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (192 + x2), tmp187 & xmask, other=0.0)
tmp216 = tl.where(tmp7, tmp214, tmp215)
tmp217 = tl.where(tmp14, tmp203, tmp216)
tmp218 = tl.full(tmp217.shape, 0.0, tmp217.dtype)
tmp219 = tl.where(tmp187, tmp217, tmp218)
tmp220 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp188 & xmask, other=0.0)
tmp221 = tmp200 + tmp220
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp188, tmp221, tmp222)
tmp224 = tl.where(tmp14, tmp223, tmp215)
tmp225 = tl.full(tmp224.shape, 0.0, tmp224.dtype)
tmp226 = tl.where(tmp187, tmp224, tmp225)
tmp227 = tl.load(in_ptr0 + (192 + x2), tmp7 & xmask, other=0.0)
tmp228 = tl.where(tmp7, tmp226, tmp227)
tmp229 = tl.where(tmp7, tmp219, tmp228)
tmp230 = tl.where(tmp5, tmp186, tmp229)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp7, tmp230, tmp231)
tmp233 = tmp14 & tmp7
tmp234 = tmp7 & tmp233
tmp235 = tmp14 & tmp234
tmp236 = tl.load(in_ptr0 + (192 + x2), tmp235 & xmask, other=0.0)
tmp237 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp235 & xmask, other=0.0)
tmp238 = tmp236 + tmp237
tmp239 = tl.full(tmp238.shape, 0.0, tmp238.dtype)
tmp240 = tl.where(tmp235, tmp238, tmp239)
tmp241 = tl.load(in_ptr0 + (192 + x2), tmp234 & xmask, other=0.0)
tmp242 = tl.where(tmp14, tmp240, tmp241)
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp234, tmp242, tmp243)
tmp245 = tl.load(in_ptr0 + (192 + x2), tmp233 & xmask, other=0.0)
tmp246 = tl.where(tmp7, tmp244, tmp245)
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp233, tmp246, tmp247)
tmp249 = tl.where(tmp14, tmp248, tmp228)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp7, tmp249, tmp250)
tmp252 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp233 & xmask, other=0.0)
tmp253 = tmp245 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp233, tmp253, tmp254)
tmp256 = tl.where(tmp14, tmp255, tmp227)
tmp257 = tl.full(tmp256.shape, 0.0, tmp256.dtype)
tmp258 = tl.where(tmp7, tmp256, tmp257)
tmp260 = tl.where(tmp7, tmp258, tmp259)
tmp261 = tl.where(tmp7, tmp251, tmp260)
tmp262 = tl.where(tmp7, tmp232, tmp261)
tmp263 = tl.where(tmp5, tmp138, tmp262)
tl.store(out_ptr0 + (x2), tmp263, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xs/cxskzsbd5xwrh5ri3x5evkuvt5qxg5vxx6dn7dcjz2nswnkrtk3e.py
# Topologically Sorted Source Nodes: [iadd_13, iadd_14, iadd_15], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_13 => add_13
# iadd_14 => add_14
# iadd_15 => add_15
# Graph fragment:
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_212, %select_27), kwargs = {})
# %slice_scatter_default_52 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_26, %add_13, 1, 4, 8), kwargs = {})
# %slice_scatter_default_53 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %slice_scatter_default_52, 0, 12, 16), kwargs = {})
# %slice_scatter_default_54 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_27, %slice_215, 1, 4, 8), kwargs = {})
# %slice_scatter_default_55 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %slice_scatter_default_54, 0, 12, 16), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_228, %select_29), kwargs = {})
# %slice_scatter_default_56 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_28, %add_14, 1, 8, 12), kwargs = {})
# %slice_scatter_default_57 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %slice_scatter_default_56, 0, 12, 16), kwargs = {})
# %slice_scatter_default_59 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %slice_scatter_default_58, 0, 12, 16), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_244, %select_31), kwargs = {})
# %slice_scatter_default_60 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_30, %add_15, 1, 12, 16), kwargs = {})
# %slice_scatter_default_61 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %slice_scatter_default_60, 0, 12, 16), kwargs = {})
# %slice_scatter_default_62 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_31, %slice_247, 1, 12, 16), kwargs = {})
# %slice_scatter_default_63 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %slice_scatter_default_62, 0, 12, 16), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 31, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp133 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + ((-192) + x2), tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp4 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp4 >= tmp11
tmp13 = tmp4 < tmp5
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp2 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_out_ptr0 + (x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + (x2), tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_out_ptr0 + (x2), tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp2, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp2 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_out_ptr0 + (x2), tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_out_ptr0 + (x2), tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_out_ptr0 + (x2), tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp2, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_out_ptr0 + (x2), tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp2, tmp53, tmp54)
tmp56 = tl.where(tmp2, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (168 + x0 + (4*x1)), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp2 & tmp2
tmp62 = tmp14 & tmp61
tmp63 = tmp2 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_out_ptr0 + (x2), tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_out_ptr0 + (x2), tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + (x2), tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp2, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp2 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_out_ptr0 + (x2), tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_out_ptr0 + (x2), tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_out_ptr0 + (x2), tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp2, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_out_ptr0 + (x2), tmp2 & xmask, other=0.0)
tmp102 = tl.where(tmp2, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp93, tmp102)
tmp104 = tl.where(tmp8, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp2, tmp104, tmp105)
tmp107 = tmp14 & tmp2
tmp108 = tmp2 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_out_ptr0 + (x2), tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_out_ptr0 + (x2), tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_out_ptr0 + (x2), tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp2, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp2, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp2, tmp130, tmp131)
tmp134 = tl.where(tmp2, tmp132, tmp133)
tmp135 = tl.where(tmp2, tmp125, tmp134)
tmp136 = tl.where(tmp2, tmp106, tmp135)
tmp137 = tl.where(tmp2, tmp3, tmp136)
tmp138 = tmp4 >= tmp1
tmp139 = tmp138 & tmp2
tmp140 = tmp2 & tmp139
tmp141 = tmp138 & tmp140
tmp142 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp141 & xmask, other=0.0)
tmp143 = tmp137 + tmp142
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp141, tmp143, tmp144)
tmp146 = tl.where(tmp138, tmp145, tmp137)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp140, tmp146, tmp147)
tmp149 = tl.where(tmp2, tmp148, tmp137)
tmp150 = tl.full(tmp149.shape, 0.0, tmp149.dtype)
tmp151 = tl.where(tmp139, tmp149, tmp150)
tmp152 = tmp138 & tmp61
tmp153 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp152 & xmask, other=0.0)
tmp154 = tmp137 + tmp153
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp152, tmp154, tmp155)
tmp157 = tl.where(tmp138, tmp156, tmp137)
tmp158 = tl.full(tmp157.shape, 0.0, tmp157.dtype)
tmp159 = tl.where(tmp61, tmp157, tmp158)
tmp160 = tl.where(tmp2, tmp159, tmp137)
tmp161 = tl.where(tmp138, tmp151, tmp160)
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp2, tmp161, tmp162)
tmp164 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp139 & xmask, other=0.0)
tmp165 = tmp137 + tmp164
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp139, tmp165, tmp166)
tmp168 = tl.where(tmp138, tmp167, tmp137)
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp2, tmp168, tmp169)
tmp171 = tl.where(tmp2, tmp170, tmp137)
tmp172 = tl.where(tmp2, tmp163, tmp171)
tl.store(in_out_ptr0 + (x2), tmp172, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [iadd_2], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf0, arg0_1, buf1, 64, grid=grid(64), stream=stream0)
del buf0
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [output, iadd, iadd_1, iadd_3, iadd_4], Original ATen: [aten.zeros, aten.add]
triton_poi_fused_add_zeros_2.run(buf3, buf1, arg0_1, 256, grid=grid(256), stream=stream0)
buf4 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [iadd_6], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf3, arg0_1, buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [iadd_5], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf6, buf5, buf4, arg0_1, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(buf6, arg0_1, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf6; del buf6 # reuse
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [iadd_7, iadd_8, iadd_9, iadd_10], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf9, buf7, arg0_1, 256, grid=grid(256), stream=stream0)
buf10 = buf7; del buf7 # reuse
buf11 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [iadd_12], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf9, arg0_1, buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [iadd_11], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf12, buf11, buf10, arg0_1, 256, grid=grid(256), stream=stream0)
del buf10
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf12, arg0_1, buf13, 64, grid=grid(64), stream=stream0)
buf14 = buf12; del buf12 # reuse
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [iadd_13, iadd_14, iadd_15], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf15, buf13, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf13
return (buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Truncation2D(torch.nn.Module):
"""
A module merging the last two dimensions, merging coarse scale in grid
of dimensions -4, -3 and finer resolution in dimensions -2, -1 to
one fine grained grid with two dimensions less.
"""
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
"""
:param input: input tensor
:returns: output tensor
"""
shape = input.shape
outputshape = list(input.shape[:-2])
expsize1 = input.shape[-2]
expsize2 = input.shape[-1]
outputshape[-2] *= input.shape[-2]
outputshape[-1] *= input.shape[-1]
baseslice = [slice(None, None, 1) for _ in range(len(outputshape) - 2)]
output = torch.zeros(outputshape, device=input.device,
requires_grad=False)
for i in range(shape[-4]):
for j in range(shape[-3]):
outslice = tuple(baseslice + [slice(expsize1 * i, expsize1 *
(i + 1)), slice(expsize2 * j, expsize2 * (j + 1))])
inslice = tuple(baseslice + [i, j, slice(None, None, 1),
slice(None, None, 1)])
output[outslice] += input[inslice]
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp264 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp4 & tmp6
tmp8 = tmp7 & tmp2
tmp9 = tmp2 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp2 & tmp10
tmp12 = tmp3 < tmp1
tmp13 = tmp12 & tmp11
tmp14 = tmp2 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (8 + x2), tmp15 & xmask, other=0.0)
tmp17 = 0.0
tmp18 = tmp17 + tmp16
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp15, tmp18, tmp19)
tmp21 = tl.where(tmp12, tmp20, tmp17)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.where(tmp2, tmp23, tmp17)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp2 & tmp11
tmp28 = tmp12 & tmp27
tmp29 = tl.load(in_ptr0 + (8 + x2), tmp28 & xmask, other=0.0)
tmp30 = tmp17 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp28, tmp30, tmp31)
tmp33 = tl.where(tmp12, tmp32, tmp17)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp2, tmp35, tmp17)
tmp37 = tl.where(tmp12, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp11, tmp37, tmp38)
tmp40 = tl.load(in_ptr0 + (8 + x2), tmp13 & xmask, other=0.0)
tmp41 = tmp17 + tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp13, tmp41, tmp42)
tmp44 = tl.where(tmp12, tmp43, tmp17)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp11, tmp44, tmp45)
tmp47 = tl.where(tmp2, tmp46, tmp17)
tmp48 = tl.where(tmp2, tmp39, tmp47)
tmp49 = tl.load(in_ptr0 + (20 + x2), tmp10 & xmask, other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp10, tmp50, tmp51)
tmp53 = tmp2 & tmp9
tmp54 = tmp12 & tmp53
tmp55 = tmp2 & tmp54
tmp56 = tmp12 & tmp55
tmp57 = tl.load(in_ptr0 + (8 + x2), tmp56 & xmask, other=0.0)
tmp58 = tmp17 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp56, tmp58, tmp59)
tmp61 = tl.where(tmp12, tmp60, tmp17)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp55, tmp61, tmp62)
tmp64 = tl.where(tmp2, tmp63, tmp17)
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp54, tmp64, tmp65)
tmp67 = tmp2 & tmp53
tmp68 = tmp12 & tmp67
tmp69 = tl.load(in_ptr0 + (8 + x2), tmp68 & xmask, other=0.0)
tmp70 = tmp17 + tmp69
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp68, tmp70, tmp71)
tmp73 = tl.where(tmp12, tmp72, tmp17)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp67, tmp73, tmp74)
tmp76 = tl.where(tmp2, tmp75, tmp17)
tmp77 = tl.where(tmp12, tmp66, tmp76)
tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype)
tmp79 = tl.where(tmp53, tmp77, tmp78)
tmp80 = tl.load(in_ptr0 + (8 + x2), tmp54 & xmask, other=0.0)
tmp81 = tmp17 + tmp80
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp54, tmp81, tmp82)
tmp84 = tl.where(tmp12, tmp83, tmp17)
tmp85 = tl.full(tmp84.shape, 0.0, tmp84.dtype)
tmp86 = tl.where(tmp53, tmp84, tmp85)
tmp87 = tl.where(tmp2, tmp86, tmp17)
tmp88 = tl.where(tmp2, tmp79, tmp87)
tmp89 = tl.where(tmp7, tmp52, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp9, tmp89, tmp90)
tmp92 = tmp12 & tmp9
tmp93 = tmp2 & tmp92
tmp94 = tmp12 & tmp93
tmp95 = tl.load(in_ptr0 + (8 + x2), tmp94 & xmask, other=0.0)
tmp96 = tmp17 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp94, tmp96, tmp97)
tmp99 = tl.where(tmp12, tmp98, tmp17)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp93, tmp99, tmp100)
tmp102 = tl.where(tmp2, tmp101, tmp17)
tmp103 = tl.full(tmp102.shape, 0.0, tmp102.dtype)
tmp104 = tl.where(tmp92, tmp102, tmp103)
tmp105 = tl.where(tmp12, tmp104, tmp87)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tl.load(in_ptr0 + (8 + x2), tmp92 & xmask, other=0.0)
tmp109 = tmp17 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp92, tmp109, tmp110)
tmp112 = tl.where(tmp12, tmp111, tmp17)
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp9, tmp112, tmp113)
tmp115 = tl.where(tmp2, tmp114, tmp17)
tmp116 = tl.where(tmp2, tmp107, tmp115)
tmp117 = tl.where(tmp2, tmp91, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp8, tmp117, tmp118)
tmp120 = tmp2 & tmp2
tmp121 = tmp7 & tmp120
tmp122 = tmp2 & tmp121
tmp123 = tmp12 & tmp122
tmp124 = tmp2 & tmp123
tmp125 = tmp12 & tmp124
tmp126 = tl.load(in_ptr0 + (8 + x2), tmp125 & xmask, other=0.0)
tmp127 = tmp17 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp125, tmp127, tmp128)
tmp130 = tl.where(tmp12, tmp129, tmp17)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp124, tmp130, tmp131)
tmp133 = tl.where(tmp2, tmp132, tmp17)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp123, tmp133, tmp134)
tmp136 = tmp2 & tmp122
tmp137 = tmp12 & tmp136
tmp138 = tl.load(in_ptr0 + (8 + x2), tmp137 & xmask, other=0.0)
tmp139 = tmp17 + tmp138
tmp140 = tl.full(tmp139.shape, 0.0, tmp139.dtype)
tmp141 = tl.where(tmp137, tmp139, tmp140)
tmp142 = tl.where(tmp12, tmp141, tmp17)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp136, tmp142, tmp143)
tmp145 = tl.where(tmp2, tmp144, tmp17)
tmp146 = tl.where(tmp12, tmp135, tmp145)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp122, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (8 + x2), tmp123 & xmask, other=0.0)
tmp150 = tmp17 + tmp149
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp123, tmp150, tmp151)
tmp153 = tl.where(tmp12, tmp152, tmp17)
tmp154 = tl.full(tmp153.shape, 0.0, tmp153.dtype)
tmp155 = tl.where(tmp122, tmp153, tmp154)
tmp156 = tl.where(tmp2, tmp155, tmp17)
tmp157 = tl.where(tmp2, tmp148, tmp156)
tmp158 = tl.load(in_ptr0 + (20 + x2), tmp121 & xmask, other=0.0)
tmp159 = tmp157 + tmp158
tmp160 = tl.full(tmp159.shape, 0.0, tmp159.dtype)
tmp161 = tl.where(tmp121, tmp159, tmp160)
tmp162 = tmp2 & tmp120
tmp163 = tmp12 & tmp162
tmp164 = tmp2 & tmp163
tmp165 = tmp12 & tmp164
tmp166 = tl.load(in_ptr0 + (8 + x2), tmp165 & xmask, other=0.0)
tmp167 = tmp17 + tmp166
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp165, tmp167, tmp168)
tmp170 = tl.where(tmp12, tmp169, tmp17)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp164, tmp170, tmp171)
tmp173 = tl.where(tmp2, tmp172, tmp17)
tmp174 = tl.full(tmp173.shape, 0.0, tmp173.dtype)
tmp175 = tl.where(tmp163, tmp173, tmp174)
tmp176 = tmp2 & tmp162
tmp177 = tmp12 & tmp176
tmp178 = tl.load(in_ptr0 + (8 + x2), tmp177 & xmask, other=0.0)
tmp179 = tmp17 + tmp178
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp177, tmp179, tmp180)
tmp182 = tl.where(tmp12, tmp181, tmp17)
tmp183 = tl.full(tmp182.shape, 0.0, tmp182.dtype)
tmp184 = tl.where(tmp176, tmp182, tmp183)
tmp185 = tl.where(tmp2, tmp184, tmp17)
tmp186 = tl.where(tmp12, tmp175, tmp185)
tmp187 = tl.full(tmp186.shape, 0.0, tmp186.dtype)
tmp188 = tl.where(tmp162, tmp186, tmp187)
tmp189 = tl.load(in_ptr0 + (8 + x2), tmp163 & xmask, other=0.0)
tmp190 = tmp17 + tmp189
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp163, tmp190, tmp191)
tmp193 = tl.where(tmp12, tmp192, tmp17)
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp162, tmp193, tmp194)
tmp196 = tl.where(tmp2, tmp195, tmp17)
tmp197 = tl.where(tmp2, tmp188, tmp196)
tmp198 = tl.where(tmp7, tmp161, tmp197)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp120, tmp198, tmp199)
tmp201 = tmp12 & tmp120
tmp202 = tmp2 & tmp201
tmp203 = tmp12 & tmp202
tmp204 = tl.load(in_ptr0 + (8 + x2), tmp203 & xmask, other=0.0)
tmp205 = tmp17 + tmp204
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp203, tmp205, tmp206)
tmp208 = tl.where(tmp12, tmp207, tmp17)
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp202, tmp208, tmp209)
tmp211 = tl.where(tmp2, tmp210, tmp17)
tmp212 = tl.full(tmp211.shape, 0.0, tmp211.dtype)
tmp213 = tl.where(tmp201, tmp211, tmp212)
tmp214 = tl.where(tmp12, tmp213, tmp196)
tmp215 = tl.full(tmp214.shape, 0.0, tmp214.dtype)
tmp216 = tl.where(tmp120, tmp214, tmp215)
tmp217 = tl.load(in_ptr0 + (8 + x2), tmp201 & xmask, other=0.0)
tmp218 = tmp17 + tmp217
tmp219 = tl.full(tmp218.shape, 0.0, tmp218.dtype)
tmp220 = tl.where(tmp201, tmp218, tmp219)
tmp221 = tl.where(tmp12, tmp220, tmp17)
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp120, tmp221, tmp222)
tmp224 = tl.where(tmp2, tmp223, tmp17)
tmp225 = tl.where(tmp2, tmp216, tmp224)
tmp226 = tl.where(tmp2, tmp200, tmp225)
tmp227 = tl.where(tmp7, tmp119, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp2, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (20 + x2), tmp8 & xmask, other=0.0)
tmp231 = tmp116 + tmp230
tmp232 = tl.full(tmp231.shape, 0.0, tmp231.dtype)
tmp233 = tl.where(tmp8, tmp231, tmp232)
tmp234 = tl.where(tmp7, tmp233, tmp225)
tmp235 = tl.full(tmp234.shape, 0.0, tmp234.dtype)
tmp236 = tl.where(tmp2, tmp234, tmp235)
tmp237 = tmp12 & tmp2
tmp238 = tmp2 & tmp237
tmp239 = tmp12 & tmp238
tmp240 = tl.load(in_ptr0 + (8 + x2), tmp239 & xmask, other=0.0)
tmp241 = tmp17 + tmp240
tmp242 = tl.full(tmp241.shape, 0.0, tmp241.dtype)
tmp243 = tl.where(tmp239, tmp241, tmp242)
tmp244 = tl.where(tmp12, tmp243, tmp17)
tmp245 = tl.full(tmp244.shape, 0.0, tmp244.dtype)
tmp246 = tl.where(tmp238, tmp244, tmp245)
tmp247 = tl.where(tmp2, tmp246, tmp17)
tmp248 = tl.full(tmp247.shape, 0.0, tmp247.dtype)
tmp249 = tl.where(tmp237, tmp247, tmp248)
tmp250 = tl.where(tmp12, tmp249, tmp224)
tmp251 = tl.full(tmp250.shape, 0.0, tmp250.dtype)
tmp252 = tl.where(tmp2, tmp250, tmp251)
tmp253 = tl.load(in_ptr0 + (8 + x2), tmp237 & xmask, other=0.0)
tmp254 = tmp17 + tmp253
tmp255 = tl.full(tmp254.shape, 0.0, tmp254.dtype)
tmp256 = tl.where(tmp237, tmp254, tmp255)
tmp257 = tl.where(tmp12, tmp256, tmp17)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp2, tmp257, tmp258)
tmp260 = tl.where(tmp2, tmp259, tmp17)
tmp261 = tl.where(tmp2, tmp252, tmp260)
tmp262 = tl.where(tmp2, tmp236, tmp261)
tmp263 = tl.where(tmp2, tmp229, tmp262)
tmp265 = tmp263 + tmp264
tl.store(out_ptr0 + x2, tmp265, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-8 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tmp7 = x1
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp0 >= tmp8
tmp11 = tmp0 < tmp1
tmp12 = tmp10 & tmp11
tmp13 = tmp12 & tmp9
tmp14 = tmp9 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tmp9 & tmp15
tmp17 = tmp0 < tmp8
tmp18 = tmp17 & tmp16
tmp19 = tmp9 & tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp20 & xmask, other=0.0)
tmp22 = 0.0
tmp23 = tmp22 + tmp21
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp17, tmp25, tmp22)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp19, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp28, tmp22)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp18, tmp29, tmp30)
tmp32 = tmp9 & tmp16
tmp33 = tmp17 & tmp32
tmp34 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp33 & xmask, other=0.0)
tmp35 = tmp22 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp33, tmp35, tmp36)
tmp38 = tl.where(tmp17, tmp37, tmp22)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp9, tmp40, tmp22)
tmp42 = tl.where(tmp17, tmp31, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp16, tmp42, tmp43)
tmp45 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp18 & xmask, other=0.0)
tmp46 = tmp22 + tmp45
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp18, tmp46, tmp47)
tmp49 = tl.where(tmp17, tmp48, tmp22)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp9, tmp51, tmp22)
tmp53 = tl.where(tmp9, tmp44, tmp52)
tmp54 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp55 = tmp53 + tmp54
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp15, tmp55, tmp56)
tmp58 = tmp9 & tmp14
tmp59 = tmp17 & tmp58
tmp60 = tmp9 & tmp59
tmp61 = tmp17 & tmp60
tmp62 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp61 & xmask, other=0.0)
tmp63 = tmp22 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp61, tmp63, tmp64)
tmp66 = tl.where(tmp17, tmp65, tmp22)
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp60, tmp66, tmp67)
tmp69 = tl.where(tmp9, tmp68, tmp22)
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp59, tmp69, tmp70)
tmp72 = tmp9 & tmp58
tmp73 = tmp17 & tmp72
tmp74 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp73 & xmask, other=0.0)
tmp75 = tmp22 + tmp74
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp73, tmp75, tmp76)
tmp78 = tl.where(tmp17, tmp77, tmp22)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp72, tmp78, tmp79)
tmp81 = tl.where(tmp9, tmp80, tmp22)
tmp82 = tl.where(tmp17, tmp71, tmp81)
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp58, tmp82, tmp83)
tmp85 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp59 & xmask, other=0.0)
tmp86 = tmp22 + tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp59, tmp86, tmp87)
tmp89 = tl.where(tmp17, tmp88, tmp22)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp58, tmp89, tmp90)
tmp92 = tl.where(tmp9, tmp91, tmp22)
tmp93 = tl.where(tmp9, tmp84, tmp92)
tmp94 = tl.where(tmp12, tmp57, tmp93)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp14, tmp94, tmp95)
tmp97 = tmp17 & tmp14
tmp98 = tmp9 & tmp97
tmp99 = tmp17 & tmp98
tmp100 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp99 & xmask, other=0.0)
tmp101 = tmp22 + tmp100
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp99, tmp101, tmp102)
tmp104 = tl.where(tmp17, tmp103, tmp22)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp98, tmp104, tmp105)
tmp107 = tl.where(tmp9, tmp106, tmp22)
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp97, tmp107, tmp108)
tmp110 = tl.where(tmp17, tmp109, tmp92)
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp14, tmp110, tmp111)
tmp113 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp97 & xmask, other=0.0)
tmp114 = tmp22 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp97, tmp114, tmp115)
tmp117 = tl.where(tmp17, tmp116, tmp22)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp14, tmp117, tmp118)
tmp120 = tl.where(tmp9, tmp119, tmp22)
tmp121 = tl.where(tmp9, tmp112, tmp120)
tmp122 = tl.where(tmp9, tmp96, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp13, tmp122, tmp123)
tmp125 = tmp9 & tmp9
tmp126 = tmp12 & tmp125
tmp127 = tmp9 & tmp126
tmp128 = tmp17 & tmp127
tmp129 = tmp9 & tmp128
tmp130 = tmp17 & tmp129
tmp131 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp130 & xmask, other=0.0)
tmp132 = tmp22 + tmp131
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp130, tmp132, tmp133)
tmp135 = tl.where(tmp17, tmp134, tmp22)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp129, tmp135, tmp136)
tmp138 = tl.where(tmp9, tmp137, tmp22)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp128, tmp138, tmp139)
tmp141 = tmp9 & tmp127
tmp142 = tmp17 & tmp141
tmp143 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp142 & xmask, other=0.0)
tmp144 = tmp22 + tmp143
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp142, tmp144, tmp145)
tmp147 = tl.where(tmp17, tmp146, tmp22)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp141, tmp147, tmp148)
tmp150 = tl.where(tmp9, tmp149, tmp22)
tmp151 = tl.where(tmp17, tmp140, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp127, tmp151, tmp152)
tmp154 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp128 & xmask, other=0.0)
tmp155 = tmp22 + tmp154
tmp156 = tl.full(tmp155.shape, 0.0, tmp155.dtype)
tmp157 = tl.where(tmp128, tmp155, tmp156)
tmp158 = tl.where(tmp17, tmp157, tmp22)
tmp159 = tl.full(tmp158.shape, 0.0, tmp158.dtype)
tmp160 = tl.where(tmp127, tmp158, tmp159)
tmp161 = tl.where(tmp9, tmp160, tmp22)
tmp162 = tl.where(tmp9, tmp153, tmp161)
tmp163 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp126 & xmask, other=0.0)
tmp164 = tmp162 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp126, tmp164, tmp165)
tmp167 = tmp9 & tmp125
tmp168 = tmp17 & tmp167
tmp169 = tmp9 & tmp168
tmp170 = tmp17 & tmp169
tmp171 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp170 & xmask, other=0.0)
tmp172 = tmp22 + tmp171
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp170, tmp172, tmp173)
tmp175 = tl.where(tmp17, tmp174, tmp22)
tmp176 = tl.full(tmp175.shape, 0.0, tmp175.dtype)
tmp177 = tl.where(tmp169, tmp175, tmp176)
tmp178 = tl.where(tmp9, tmp177, tmp22)
tmp179 = tl.full(tmp178.shape, 0.0, tmp178.dtype)
tmp180 = tl.where(tmp168, tmp178, tmp179)
tmp181 = tmp9 & tmp167
tmp182 = tmp17 & tmp181
tmp183 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp182 & xmask, other=0.0)
tmp184 = tmp22 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp182, tmp184, tmp185)
tmp187 = tl.where(tmp17, tmp186, tmp22)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp181, tmp187, tmp188)
tmp190 = tl.where(tmp9, tmp189, tmp22)
tmp191 = tl.where(tmp17, tmp180, tmp190)
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp167, tmp191, tmp192)
tmp194 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp168 & xmask, other=0.0)
tmp195 = tmp22 + tmp194
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp168, tmp195, tmp196)
tmp198 = tl.where(tmp17, tmp197, tmp22)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp167, tmp198, tmp199)
tmp201 = tl.where(tmp9, tmp200, tmp22)
tmp202 = tl.where(tmp9, tmp193, tmp201)
tmp203 = tl.where(tmp12, tmp166, tmp202)
tmp204 = tl.full(tmp203.shape, 0.0, tmp203.dtype)
tmp205 = tl.where(tmp125, tmp203, tmp204)
tmp206 = tmp17 & tmp125
tmp207 = tmp9 & tmp206
tmp208 = tmp17 & tmp207
tmp209 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp208 & xmask, other=0.0)
tmp210 = tmp22 + tmp209
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp208, tmp210, tmp211)
tmp213 = tl.where(tmp17, tmp212, tmp22)
tmp214 = tl.full(tmp213.shape, 0.0, tmp213.dtype)
tmp215 = tl.where(tmp207, tmp213, tmp214)
tmp216 = tl.where(tmp9, tmp215, tmp22)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp206, tmp216, tmp217)
tmp219 = tl.where(tmp17, tmp218, tmp201)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp125, tmp219, tmp220)
tmp222 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp206 & xmask, other=0.0)
tmp223 = tmp22 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp206, tmp223, tmp224)
tmp226 = tl.where(tmp17, tmp225, tmp22)
tmp227 = tl.full(tmp226.shape, 0.0, tmp226.dtype)
tmp228 = tl.where(tmp125, tmp226, tmp227)
tmp229 = tl.where(tmp9, tmp228, tmp22)
tmp230 = tl.where(tmp9, tmp221, tmp229)
tmp231 = tl.where(tmp9, tmp205, tmp230)
tmp232 = tl.where(tmp12, tmp124, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp13 & xmask, other=0.0)
tmp236 = tmp121 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp13, tmp236, tmp237)
tmp239 = tl.where(tmp12, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp9, tmp239, tmp240)
tmp242 = tmp17 & tmp9
tmp243 = tmp9 & tmp242
tmp244 = tmp17 & tmp243
tmp245 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp244 & xmask, other=0.0)
tmp246 = tmp22 + tmp245
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp244, tmp246, tmp247)
tmp249 = tl.where(tmp17, tmp248, tmp22)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp243, tmp249, tmp250)
tmp252 = tl.where(tmp9, tmp251, tmp22)
tmp253 = tl.full(tmp252.shape, 0.0, tmp252.dtype)
tmp254 = tl.where(tmp242, tmp252, tmp253)
tmp255 = tl.where(tmp17, tmp254, tmp229)
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp9, tmp255, tmp256)
tmp258 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp242 & xmask, other=0.0)
tmp259 = tmp22 + tmp258
tmp260 = tl.full(tmp259.shape, 0.0, tmp259.dtype)
tmp261 = tl.where(tmp242, tmp259, tmp260)
tmp262 = tl.where(tmp17, tmp261, tmp22)
tmp263 = tl.full(tmp262.shape, 0.0, tmp262.dtype)
tmp264 = tl.where(tmp9, tmp262, tmp263)
tmp265 = tl.where(tmp9, tmp264, tmp22)
tmp266 = tl.where(tmp9, tmp257, tmp265)
tmp267 = tl.where(tmp9, tmp241, tmp266)
tmp268 = tl.where(tmp9, tmp234, tmp267)
tmp269 = tl.where(tmp5, tmp6, tmp268)
tl.store(out_ptr0 + x2, tmp269, xmask)
@triton.jit
def triton_poi_fused_add_zeros_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + x2, tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tmp4 >= tmp1
tmp6 = tl.full([1], 8, tl.int64)
tmp7 = tmp4 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp8 & tmp10
tmp12 = tmp2 & tmp11
tmp13 = tmp4 < tmp1
tmp14 = tmp13 & tmp12
tmp15 = tmp2 & tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp16 & xmask, other=0.0)
tmp18 = 0.0
tmp19 = tmp18 + tmp17
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp16, tmp19, tmp20)
tmp22 = tl.where(tmp13, tmp21, tmp18)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp15, tmp22, tmp23)
tmp25 = tl.where(tmp2, tmp24, tmp18)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tmp2 & tmp12
tmp29 = tmp13 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp29 & xmask, other=0.0)
tmp31 = tmp18 + tmp30
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tl.where(tmp13, tmp33, tmp18)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp28, tmp34, tmp35)
tmp37 = tl.where(tmp2, tmp36, tmp18)
tmp38 = tl.where(tmp13, tmp27, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp12, tmp38, tmp39)
tmp41 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp14 & xmask, other=0.0)
tmp42 = tmp18 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp14, tmp42, tmp43)
tmp45 = tl.where(tmp13, tmp44, tmp18)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp12, tmp45, tmp46)
tmp48 = tl.where(tmp2, tmp47, tmp18)
tmp49 = tl.where(tmp2, tmp40, tmp48)
tmp50 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp11 & xmask, other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp11, tmp51, tmp52)
tmp54 = tmp2 & tmp10
tmp55 = tmp13 & tmp54
tmp56 = tmp2 & tmp55
tmp57 = tmp13 & tmp56
tmp58 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp57 & xmask, other=0.0)
tmp59 = tmp18 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp57, tmp59, tmp60)
tmp62 = tl.where(tmp13, tmp61, tmp18)
tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype)
tmp64 = tl.where(tmp56, tmp62, tmp63)
tmp65 = tl.where(tmp2, tmp64, tmp18)
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp55, tmp65, tmp66)
tmp68 = tmp2 & tmp54
tmp69 = tmp13 & tmp68
tmp70 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp69 & xmask, other=0.0)
tmp71 = tmp18 + tmp70
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp69, tmp71, tmp72)
tmp74 = tl.where(tmp13, tmp73, tmp18)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp68, tmp74, tmp75)
tmp77 = tl.where(tmp2, tmp76, tmp18)
tmp78 = tl.where(tmp13, tmp67, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp55 & xmask, other=0.0)
tmp82 = tmp18 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp55, tmp82, tmp83)
tmp85 = tl.where(tmp13, tmp84, tmp18)
tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype)
tmp87 = tl.where(tmp54, tmp85, tmp86)
tmp88 = tl.where(tmp2, tmp87, tmp18)
tmp89 = tl.where(tmp2, tmp80, tmp88)
tmp90 = tl.where(tmp8, tmp53, tmp89)
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp10, tmp90, tmp91)
tmp93 = tmp13 & tmp10
tmp94 = tmp2 & tmp93
tmp95 = tmp13 & tmp94
tmp96 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp95 & xmask, other=0.0)
tmp97 = tmp18 + tmp96
tmp98 = tl.full(tmp97.shape, 0.0, tmp97.dtype)
tmp99 = tl.where(tmp95, tmp97, tmp98)
tmp100 = tl.where(tmp13, tmp99, tmp18)
tmp101 = tl.full(tmp100.shape, 0.0, tmp100.dtype)
tmp102 = tl.where(tmp94, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp102, tmp18)
tmp104 = tl.full(tmp103.shape, 0.0, tmp103.dtype)
tmp105 = tl.where(tmp93, tmp103, tmp104)
tmp106 = tl.where(tmp13, tmp105, tmp88)
tmp107 = tl.full(tmp106.shape, 0.0, tmp106.dtype)
tmp108 = tl.where(tmp10, tmp106, tmp107)
tmp109 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp93 & xmask, other=0.0)
tmp110 = tmp18 + tmp109
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp93, tmp110, tmp111)
tmp113 = tl.where(tmp13, tmp112, tmp18)
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp10, tmp113, tmp114)
tmp116 = tl.where(tmp2, tmp115, tmp18)
tmp117 = tl.where(tmp2, tmp108, tmp116)
tmp118 = tl.where(tmp2, tmp92, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp9, tmp118, tmp119)
tmp121 = tmp2 & tmp2
tmp122 = tmp8 & tmp121
tmp123 = tmp2 & tmp122
tmp124 = tmp13 & tmp123
tmp125 = tmp2 & tmp124
tmp126 = tmp13 & tmp125
tmp127 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp126 & xmask, other=0.0)
tmp128 = tmp18 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp126, tmp128, tmp129)
tmp131 = tl.where(tmp13, tmp130, tmp18)
tmp132 = tl.full(tmp131.shape, 0.0, tmp131.dtype)
tmp133 = tl.where(tmp125, tmp131, tmp132)
tmp134 = tl.where(tmp2, tmp133, tmp18)
tmp135 = tl.full(tmp134.shape, 0.0, tmp134.dtype)
tmp136 = tl.where(tmp124, tmp134, tmp135)
tmp137 = tmp2 & tmp123
tmp138 = tmp13 & tmp137
tmp139 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp138 & xmask, other=0.0)
tmp140 = tmp18 + tmp139
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp138, tmp140, tmp141)
tmp143 = tl.where(tmp13, tmp142, tmp18)
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp137, tmp143, tmp144)
tmp146 = tl.where(tmp2, tmp145, tmp18)
tmp147 = tl.where(tmp13, tmp136, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp123, tmp147, tmp148)
tmp150 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp124 & xmask, other=0.0)
tmp151 = tmp18 + tmp150
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp124, tmp151, tmp152)
tmp154 = tl.where(tmp13, tmp153, tmp18)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp123, tmp154, tmp155)
tmp157 = tl.where(tmp2, tmp156, tmp18)
tmp158 = tl.where(tmp2, tmp149, tmp157)
tmp159 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp122 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp122, tmp160, tmp161)
tmp163 = tmp2 & tmp121
tmp164 = tmp13 & tmp163
tmp165 = tmp2 & tmp164
tmp166 = tmp13 & tmp165
tmp167 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp166 & xmask, other=0.0)
tmp168 = tmp18 + tmp167
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp166, tmp168, tmp169)
tmp171 = tl.where(tmp13, tmp170, tmp18)
tmp172 = tl.full(tmp171.shape, 0.0, tmp171.dtype)
tmp173 = tl.where(tmp165, tmp171, tmp172)
tmp174 = tl.where(tmp2, tmp173, tmp18)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp164, tmp174, tmp175)
tmp177 = tmp2 & tmp163
tmp178 = tmp13 & tmp177
tmp179 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp178 & xmask, other=0.0)
tmp180 = tmp18 + tmp179
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp178, tmp180, tmp181)
tmp183 = tl.where(tmp13, tmp182, tmp18)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp177, tmp183, tmp184)
tmp186 = tl.where(tmp2, tmp185, tmp18)
tmp187 = tl.where(tmp13, tmp176, tmp186)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp163, tmp187, tmp188)
tmp190 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp164 & xmask, other=0.0)
tmp191 = tmp18 + tmp190
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp164, tmp191, tmp192)
tmp194 = tl.where(tmp13, tmp193, tmp18)
tmp195 = tl.full(tmp194.shape, 0.0, tmp194.dtype)
tmp196 = tl.where(tmp163, tmp194, tmp195)
tmp197 = tl.where(tmp2, tmp196, tmp18)
tmp198 = tl.where(tmp2, tmp189, tmp197)
tmp199 = tl.where(tmp8, tmp162, tmp198)
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp121, tmp199, tmp200)
tmp202 = tmp13 & tmp121
tmp203 = tmp2 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp204 & xmask, other=0.0)
tmp206 = tmp18 + tmp205
tmp207 = tl.full(tmp206.shape, 0.0, tmp206.dtype)
tmp208 = tl.where(tmp204, tmp206, tmp207)
tmp209 = tl.where(tmp13, tmp208, tmp18)
tmp210 = tl.full(tmp209.shape, 0.0, tmp209.dtype)
tmp211 = tl.where(tmp203, tmp209, tmp210)
tmp212 = tl.where(tmp2, tmp211, tmp18)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp202, tmp212, tmp213)
tmp215 = tl.where(tmp13, tmp214, tmp197)
tmp216 = tl.full(tmp215.shape, 0.0, tmp215.dtype)
tmp217 = tl.where(tmp121, tmp215, tmp216)
tmp218 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp202 & xmask, other=0.0)
tmp219 = tmp18 + tmp218
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp202, tmp219, tmp220)
tmp222 = tl.where(tmp13, tmp221, tmp18)
tmp223 = tl.full(tmp222.shape, 0.0, tmp222.dtype)
tmp224 = tl.where(tmp121, tmp222, tmp223)
tmp225 = tl.where(tmp2, tmp224, tmp18)
tmp226 = tl.where(tmp2, tmp217, tmp225)
tmp227 = tl.where(tmp2, tmp201, tmp226)
tmp228 = tl.where(tmp8, tmp120, tmp227)
tmp229 = tl.full(tmp228.shape, 0.0, tmp228.dtype)
tmp230 = tl.where(tmp2, tmp228, tmp229)
tmp231 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp232 = tmp117 + tmp231
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.where(tmp8, tmp234, tmp226)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp2, tmp235, tmp236)
tmp238 = tmp13 & tmp2
tmp239 = tmp2 & tmp238
tmp240 = tmp13 & tmp239
tmp241 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp240 & xmask, other=0.0)
tmp242 = tmp18 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp13, tmp244, tmp18)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp2, tmp247, tmp18)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp13, tmp250, tmp225)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp2, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp238 & xmask, other=0.0)
tmp255 = tmp18 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp13, tmp257, tmp18)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp2, tmp258, tmp259)
tmp261 = tl.where(tmp2, tmp260, tmp18)
tmp262 = tl.where(tmp2, tmp253, tmp261)
tmp263 = tl.where(tmp2, tmp237, tmp262)
tmp264 = tl.where(tmp2, tmp230, tmp263)
tmp265 = tl.where(tmp2, tmp3, tmp264)
tmp266 = tmp0 >= tmp1
tmp267 = tmp0 < tmp6
tmp268 = tmp266 & tmp267
tmp269 = tmp13 & tmp268
tmp270 = tmp2 & tmp269
tmp271 = tl.full([1], 12, tl.int64)
tmp272 = tmp4 >= tmp271
tmp273 = tmp272 & tmp270
tmp274 = tmp2 & tmp273
tmp275 = tmp272 & tmp274
tmp276 = tmp2 & tmp275
tmp277 = tmp4 >= tmp6
tmp278 = tmp4 < tmp271
tmp279 = tmp277 & tmp278
tmp279 & tmp276
tmp281 = tl.where(tmp279, tmp265, tmp265)
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp276, tmp281, tmp282)
tmp284 = tl.where(tmp2, tmp283, tmp265)
tmp285 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp275 & xmask, other=0.0)
tmp286 = tmp284 + tmp285
tmp287 = tl.full(tmp286.shape, 0.0, tmp286.dtype)
tmp288 = tl.where(tmp275, tmp286, tmp287)
tmp289 = tmp2 & tmp274
tmp279 & tmp289
tmp291 = tl.where(tmp289, tmp281, tmp282)
tmp292 = tl.where(tmp2, tmp291, tmp265)
tmp293 = tl.where(tmp272, tmp288, tmp292)
tmp294 = tl.full(tmp293.shape, 0.0, tmp293.dtype)
tmp295 = tl.where(tmp274, tmp293, tmp294)
tmp279 & tmp274
tmp297 = tl.where(tmp274, tmp281, tmp282)
tmp298 = tl.where(tmp2, tmp297, tmp265)
tmp299 = tl.where(tmp2, tmp295, tmp298)
tmp300 = tl.full(tmp299.shape, 0.0, tmp299.dtype)
tmp301 = tl.where(tmp273, tmp299, tmp300)
tmp302 = tmp2 & tmp270
tmp303 = tmp272 & tmp302
tmp304 = tmp2 & tmp303
tmp279 & tmp304
tmp306 = tl.where(tmp304, tmp281, tmp282)
tmp307 = tl.where(tmp2, tmp306, tmp265)
tmp308 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp303 & xmask, other=0.0)
tmp309 = tmp307 + tmp308
tmp310 = tl.full(tmp309.shape, 0.0, tmp309.dtype)
tmp311 = tl.where(tmp303, tmp309, tmp310)
tmp312 = tmp2 & tmp302
tmp279 & tmp312
tmp314 = tl.where(tmp312, tmp281, tmp282)
tmp315 = tl.where(tmp2, tmp314, tmp265)
tmp316 = tl.where(tmp272, tmp311, tmp315)
tmp317 = tl.full(tmp316.shape, 0.0, tmp316.dtype)
tmp318 = tl.where(tmp302, tmp316, tmp317)
tmp279 & tmp302
tmp320 = tl.where(tmp302, tmp281, tmp282)
tmp321 = tl.where(tmp2, tmp320, tmp265)
tmp322 = tl.where(tmp2, tmp318, tmp321)
tmp323 = tl.where(tmp272, tmp301, tmp322)
tmp324 = tl.full(tmp323.shape, 0.0, tmp323.dtype)
tmp325 = tl.where(tmp270, tmp323, tmp324)
tmp326 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp273 & xmask, other=0.0)
tmp327 = tmp298 + tmp326
tmp328 = tl.full(tmp327.shape, 0.0, tmp327.dtype)
tmp329 = tl.where(tmp273, tmp327, tmp328)
tmp330 = tl.where(tmp272, tmp329, tmp321)
tmp331 = tl.full(tmp330.shape, 0.0, tmp330.dtype)
tmp332 = tl.where(tmp270, tmp330, tmp331)
tmp279 & tmp270
tmp334 = tl.where(tmp270, tmp281, tmp282)
tmp335 = tl.where(tmp2, tmp334, tmp265)
tmp336 = tl.where(tmp2, tmp332, tmp335)
tmp337 = tl.where(tmp2, tmp325, tmp336)
tmp338 = tl.load(in_ptr1 + (48 + x0 + 4 * x1), tmp269 & xmask, other=0.0)
tmp339 = tmp337 + tmp338
tmp340 = tl.full(tmp339.shape, 0.0, tmp339.dtype)
tmp341 = tl.where(tmp269, tmp339, tmp340)
tmp342 = tmp2 & tmp268
tmp343 = tmp272 & tmp342
tmp344 = tmp2 & tmp343
tmp345 = tmp272 & tmp344
tmp346 = tmp2 & tmp345
tmp279 & tmp346
tmp348 = tl.where(tmp346, tmp281, tmp282)
tmp349 = tl.where(tmp2, tmp348, tmp265)
tmp350 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp345 & xmask, other=0.0)
tmp351 = tmp349 + tmp350
tmp352 = tl.full(tmp351.shape, 0.0, tmp351.dtype)
tmp353 = tl.where(tmp345, tmp351, tmp352)
tmp354 = tmp2 & tmp344
tmp279 & tmp354
tmp356 = tl.where(tmp354, tmp281, tmp282)
tmp357 = tl.where(tmp2, tmp356, tmp265)
tmp358 = tl.where(tmp272, tmp353, tmp357)
tmp359 = tl.full(tmp358.shape, 0.0, tmp358.dtype)
tmp360 = tl.where(tmp344, tmp358, tmp359)
tmp279 & tmp344
tmp362 = tl.where(tmp344, tmp281, tmp282)
tmp363 = tl.where(tmp2, tmp362, tmp265)
tmp364 = tl.where(tmp2, tmp360, tmp363)
tmp365 = tl.full(tmp364.shape, 0.0, tmp364.dtype)
tmp366 = tl.where(tmp343, tmp364, tmp365)
tmp367 = tmp2 & tmp342
tmp368 = tmp272 & tmp367
tmp369 = tmp2 & tmp368
tmp279 & tmp369
tmp371 = tl.where(tmp369, tmp281, tmp282)
tmp372 = tl.where(tmp2, tmp371, tmp265)
tmp373 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp368 & xmask, other=0.0)
tmp374 = tmp372 + tmp373
tmp375 = tl.full(tmp374.shape, 0.0, tmp374.dtype)
tmp376 = tl.where(tmp368, tmp374, tmp375)
tmp377 = tmp2 & tmp367
tmp279 & tmp377
tmp379 = tl.where(tmp377, tmp281, tmp282)
tmp380 = tl.where(tmp2, tmp379, tmp265)
tmp381 = tl.where(tmp272, tmp376, tmp380)
tmp382 = tl.full(tmp381.shape, 0.0, tmp381.dtype)
tmp383 = tl.where(tmp367, tmp381, tmp382)
tmp279 & tmp367
tmp385 = tl.where(tmp367, tmp281, tmp282)
tmp386 = tl.where(tmp2, tmp385, tmp265)
tmp387 = tl.where(tmp2, tmp383, tmp386)
tmp388 = tl.where(tmp272, tmp366, tmp387)
tmp389 = tl.full(tmp388.shape, 0.0, tmp388.dtype)
tmp390 = tl.where(tmp342, tmp388, tmp389)
tmp391 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp343 & xmask, other=0.0)
tmp392 = tmp363 + tmp391
tmp393 = tl.full(tmp392.shape, 0.0, tmp392.dtype)
tmp394 = tl.where(tmp343, tmp392, tmp393)
tmp395 = tl.where(tmp272, tmp394, tmp386)
tmp396 = tl.full(tmp395.shape, 0.0, tmp395.dtype)
tmp397 = tl.where(tmp342, tmp395, tmp396)
tmp279 & tmp342
tmp399 = tl.where(tmp342, tmp281, tmp282)
tmp400 = tl.where(tmp2, tmp399, tmp265)
tmp401 = tl.where(tmp2, tmp397, tmp400)
tmp402 = tl.where(tmp2, tmp390, tmp401)
tmp403 = tl.where(tmp13, tmp341, tmp402)
tmp404 = tl.full(tmp403.shape, 0.0, tmp403.dtype)
tmp405 = tl.where(tmp268, tmp403, tmp404)
tmp406 = tmp272 & tmp2
tmp407 = tmp2 & tmp406
tmp408 = tmp272 & tmp407
tmp409 = tmp2 & tmp408
tmp279 & tmp409
tmp411 = tl.where(tmp409, tmp281, tmp282)
tmp412 = tl.where(tmp2, tmp411, tmp265)
tmp413 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp408 & xmask, other=0.0)
tmp414 = tmp412 + tmp413
tmp415 = tl.full(tmp414.shape, 0.0, tmp414.dtype)
tmp416 = tl.where(tmp408, tmp414, tmp415)
tmp417 = tmp2 & tmp407
tmp279 & tmp417
tmp419 = tl.where(tmp417, tmp281, tmp282)
tmp420 = tl.where(tmp2, tmp419, tmp265)
tmp421 = tl.where(tmp272, tmp416, tmp420)
tmp422 = tl.full(tmp421.shape, 0.0, tmp421.dtype)
tmp423 = tl.where(tmp407, tmp421, tmp422)
tmp279 & tmp407
tmp425 = tl.where(tmp407, tmp281, tmp282)
tmp426 = tl.where(tmp2, tmp425, tmp265)
tmp427 = tl.where(tmp2, tmp423, tmp426)
tmp428 = tl.full(tmp427.shape, 0.0, tmp427.dtype)
tmp429 = tl.where(tmp406, tmp427, tmp428)
tmp430 = tmp272 & tmp121
tmp431 = tmp2 & tmp430
tmp279 & tmp431
tmp433 = tl.where(tmp431, tmp281, tmp282)
tmp434 = tl.where(tmp2, tmp433, tmp265)
tmp435 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp430 & xmask, other=0.0)
tmp436 = tmp434 + tmp435
tmp437 = tl.full(tmp436.shape, 0.0, tmp436.dtype)
tmp438 = tl.where(tmp430, tmp436, tmp437)
tmp279 & tmp163
tmp440 = tl.where(tmp163, tmp281, tmp282)
tmp441 = tl.where(tmp2, tmp440, tmp265)
tmp442 = tl.where(tmp272, tmp438, tmp441)
tmp443 = tl.full(tmp442.shape, 0.0, tmp442.dtype)
tmp444 = tl.where(tmp121, tmp442, tmp443)
tmp279 & tmp121
tmp446 = tl.where(tmp121, tmp281, tmp282)
tmp447 = tl.where(tmp2, tmp446, tmp265)
tmp448 = tl.where(tmp2, tmp444, tmp447)
tmp449 = tl.where(tmp272, tmp429, tmp448)
tmp450 = tl.full(tmp449.shape, 0.0, tmp449.dtype)
tmp451 = tl.where(tmp2, tmp449, tmp450)
tmp452 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp406 & xmask, other=0.0)
tmp453 = tmp426 + tmp452
tmp454 = tl.full(tmp453.shape, 0.0, tmp453.dtype)
tmp455 = tl.where(tmp406, tmp453, tmp454)
tmp456 = tl.where(tmp272, tmp455, tmp447)
tmp457 = tl.full(tmp456.shape, 0.0, tmp456.dtype)
tmp458 = tl.where(tmp2, tmp456, tmp457)
tmp279 & tmp2
tmp460 = tl.where(tmp2, tmp281, tmp282)
tmp461 = tl.where(tmp2, tmp460, tmp265)
tmp462 = tl.where(tmp2, tmp458, tmp461)
tmp463 = tl.where(tmp2, tmp451, tmp462)
tmp464 = tl.where(tmp268, tmp405, tmp463)
tl.store(in_out_ptr0 + x2, tmp464, xmask)
@triton.jit
def triton_poi_fused_add_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp200 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 4 + x1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp6 >= tmp7
tmp9 = tmp6 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp5
tmp12 = tmp0 >= tmp7
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp11
tmp16 = tmp10 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tmp10 & tmp17
tmp19 = tmp0 < tmp7
tmp20 = tmp19 & tmp18
tmp21 = tl.load(in_ptr0 + (64 + x2), tmp20 & xmask, other=0.0)
tmp22 = tl.load(in_ptr0 + (64 + x2), tmp18 & xmask, other=0.0)
tmp23 = tl.where(tmp19, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp18, tmp23, tmp24)
tmp26 = tl.load(in_ptr0 + (64 + x2), tmp17 & xmask, other=0.0)
tmp27 = tl.where(tmp10, tmp25, tmp26)
tmp28 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp17 & xmask, other=0.0)
tmp29 = tmp27 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp17, tmp29, tmp30)
tmp32 = tmp10 & tmp16
tmp33 = tmp19 & tmp32
tmp34 = tl.load(in_ptr0 + (64 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr0 + (64 + x2), tmp32 & xmask, other=0.0)
tmp36 = tl.where(tmp19, tmp34, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp32, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (64 + x2), tmp16 & xmask, other=0.0)
tmp40 = tl.where(tmp10, tmp38, tmp39)
tmp41 = tl.where(tmp14, tmp31, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp16, tmp41, tmp42)
tmp44 = tmp19 & tmp16
tmp45 = tl.load(in_ptr0 + (64 + x2), tmp44 & xmask, other=0.0)
tmp46 = tl.where(tmp19, tmp45, tmp39)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp16, tmp46, tmp47)
tmp49 = tl.load(in_ptr0 + (64 + x2), tmp15 & xmask, other=0.0)
tmp50 = tl.where(tmp10, tmp48, tmp49)
tmp51 = tl.where(tmp10, tmp43, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp15, tmp51, tmp52)
tmp54 = tmp10 & tmp11
tmp55 = tmp14 & tmp54
tmp56 = tmp10 & tmp55
tmp57 = tmp19 & tmp56
tmp58 = tl.load(in_ptr0 + (64 + x2), tmp57 & xmask, other=0.0)
tmp59 = tl.load(in_ptr0 + (64 + x2), tmp56 & xmask, other=0.0)
tmp60 = tl.where(tmp19, tmp58, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.load(in_ptr0 + (64 + x2), tmp55 & xmask, other=0.0)
tmp64 = tl.where(tmp10, tmp62, tmp63)
tmp65 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp55 & xmask, other=0.0)
tmp66 = tmp64 + tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp55, tmp66, tmp67)
tmp69 = tmp10 & tmp54
tmp70 = tmp19 & tmp69
tmp71 = tl.load(in_ptr0 + (64 + x2), tmp70 & xmask, other=0.0)
tmp72 = tl.load(in_ptr0 + (64 + x2), tmp69 & xmask, other=0.0)
tmp73 = tl.where(tmp19, tmp71, tmp72)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp69, tmp73, tmp74)
tmp76 = tl.load(in_ptr0 + (64 + x2), tmp54 & xmask, other=0.0)
tmp77 = tl.where(tmp10, tmp75, tmp76)
tmp78 = tl.where(tmp14, tmp68, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tmp19 & tmp54
tmp82 = tl.load(in_ptr0 + (64 + x2), tmp81 & xmask, other=0.0)
tmp83 = tl.where(tmp19, tmp82, tmp76)
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp54, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (64 + x2), tmp11 & xmask, other=0.0)
tmp87 = tl.where(tmp10, tmp85, tmp86)
tmp88 = tl.where(tmp10, tmp80, tmp87)
tmp89 = tl.where(tmp14, tmp53, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp11, tmp89, tmp90)
tmp92 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp93 = tmp50 + tmp92
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp15, tmp93, tmp94)
tmp96 = tl.where(tmp14, tmp95, tmp87)
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp11, tmp96, tmp97)
tmp99 = tmp19 & tmp11
tmp100 = tl.load(in_ptr0 + (64 + x2), tmp99 & xmask, other=0.0)
tmp101 = tl.where(tmp19, tmp100, tmp86)
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp11, tmp101, tmp102)
tmp104 = tl.load(in_ptr0 + (64 + x2), tmp5 & xmask, other=0.0)
tmp105 = tl.where(tmp10, tmp103, tmp104)
tmp106 = tl.where(tmp10, tmp98, tmp105)
tmp107 = tl.where(tmp10, tmp91, tmp106)
tmp108 = tl.load(in_ptr1 + (88 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tmp109 = tmp107 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp5, tmp109, tmp110)
tmp112 = tmp14 & tmp10
tmp113 = tmp10 & tmp112
tmp114 = tmp14 & tmp113
tmp115 = tmp10 & tmp114
tmp116 = tmp19 & tmp115
tmp117 = tl.load(in_ptr0 + (64 + x2), tmp116 & xmask, other=0.0)
tmp118 = tl.load(in_ptr0 + (64 + x2), tmp115 & xmask, other=0.0)
tmp119 = tl.where(tmp19, tmp117, tmp118)
tmp120 = tl.full(tmp119.shape, 0.0, tmp119.dtype)
tmp121 = tl.where(tmp115, tmp119, tmp120)
tmp122 = tl.load(in_ptr0 + (64 + x2), tmp114 & xmask, other=0.0)
tmp123 = tl.where(tmp10, tmp121, tmp122)
tmp124 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp114 & xmask, other=0.0)
tmp125 = tmp123 + tmp124
tmp126 = tl.full(tmp125.shape, 0.0, tmp125.dtype)
tmp127 = tl.where(tmp114, tmp125, tmp126)
tmp128 = tmp10 & tmp113
tmp129 = tmp19 & tmp128
tmp130 = tl.load(in_ptr0 + (64 + x2), tmp129 & xmask, other=0.0)
tmp131 = tl.load(in_ptr0 + (64 + x2), tmp128 & xmask, other=0.0)
tmp132 = tl.where(tmp19, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp128, tmp132, tmp133)
tmp135 = tl.load(in_ptr0 + (64 + x2), tmp113 & xmask, other=0.0)
tmp136 = tl.where(tmp10, tmp134, tmp135)
tmp137 = tl.where(tmp14, tmp127, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp113, tmp137, tmp138)
tmp140 = tmp19 & tmp113
tmp141 = tl.load(in_ptr0 + (64 + x2), tmp140 & xmask, other=0.0)
tmp142 = tl.where(tmp19, tmp141, tmp135)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp113, tmp142, tmp143)
tmp145 = tl.load(in_ptr0 + (64 + x2), tmp112 & xmask, other=0.0)
tmp146 = tl.where(tmp10, tmp144, tmp145)
tmp147 = tl.where(tmp10, tmp139, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp112, tmp147, tmp148)
tmp150 = tmp10 & tmp10
tmp151 = tmp14 & tmp150
tmp152 = tmp10 & tmp151
tmp153 = tmp19 & tmp152
tmp154 = tl.load(in_ptr0 + (64 + x2), tmp153 & xmask, other=0.0)
tmp155 = tl.load(in_ptr0 + (64 + x2), tmp152 & xmask, other=0.0)
tmp156 = tl.where(tmp19, tmp154, tmp155)
tmp157 = tl.full(tmp156.shape, 0.0, tmp156.dtype)
tmp158 = tl.where(tmp152, tmp156, tmp157)
tmp159 = tl.load(in_ptr0 + (64 + x2), tmp151 & xmask, other=0.0)
tmp160 = tl.where(tmp10, tmp158, tmp159)
tmp161 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp151 & xmask, other=0.0)
tmp162 = tmp160 + tmp161
tmp163 = tl.full(tmp162.shape, 0.0, tmp162.dtype)
tmp164 = tl.where(tmp151, tmp162, tmp163)
tmp165 = tmp10 & tmp150
tmp166 = tmp19 & tmp165
tmp167 = tl.load(in_ptr0 + (64 + x2), tmp166 & xmask, other=0.0)
tmp168 = tl.load(in_ptr0 + (64 + x2), tmp165 & xmask, other=0.0)
tmp169 = tl.where(tmp19, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp165, tmp169, tmp170)
tmp172 = tl.load(in_ptr0 + (64 + x2), tmp150 & xmask, other=0.0)
tmp173 = tl.where(tmp10, tmp171, tmp172)
tmp174 = tl.where(tmp14, tmp164, tmp173)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp150, tmp174, tmp175)
tmp177 = tmp19 & tmp150
tmp178 = tl.load(in_ptr0 + (64 + x2), tmp177 & xmask, other=0.0)
tmp179 = tl.where(tmp19, tmp178, tmp172)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp150, tmp179, tmp180)
tmp182 = tl.load(in_ptr0 + (64 + x2), tmp10 & xmask, other=0.0)
tmp183 = tl.where(tmp10, tmp181, tmp182)
tmp184 = tl.where(tmp10, tmp176, tmp183)
tmp185 = tl.where(tmp14, tmp149, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp10, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp112 & xmask, other=0.0)
tmp189 = tmp146 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp112, tmp189, tmp190)
tmp192 = tl.where(tmp14, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp10, tmp192, tmp193)
tmp195 = tmp19 & tmp10
tmp196 = tl.load(in_ptr0 + (64 + x2), tmp195 & xmask, other=0.0)
tmp197 = tl.where(tmp19, tmp196, tmp182)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp10, tmp197, tmp198)
tmp201 = tl.where(tmp10, tmp199, tmp200)
tmp202 = tl.where(tmp10, tmp194, tmp201)
tmp203 = tl.where(tmp10, tmp187, tmp202)
tmp204 = tl.where(tmp5, tmp111, tmp203)
tmp205 = tl.where(tmp10, tmp204, tmp107)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp5, tmp205, tmp206)
tmp208 = tl.where(tmp10, tmp204, tmp203)
tmp209 = tl.where(tmp5, tmp207, tmp208)
tl.store(out_ptr0 + x2, tmp204, xmask)
tl.store(out_ptr1 + x2, tmp209, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp101 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-64 + x2), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (-64 + x2), tmp5 & xmask, other=0.0)
tmp8 = x0
tmp9 = tmp8 >= tmp1
tmp10 = tmp8 < tmp3
tmp11 = tmp9 & tmp10
tmp12 = tmp11 & tmp5
tmp13 = tmp5 & tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp5 & tmp14
tmp16 = tmp8 < tmp1
tmp17 = tmp16 & tmp15
tmp18 = tl.load(in_out_ptr0 + x2, tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_out_ptr0 + x2, tmp15 & xmask, other=0.0)
tmp20 = tl.where(tmp16, tmp18, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp15, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + x2, tmp14 & xmask, other=0.0)
tmp24 = tl.where(tmp5, tmp22, tmp23)
tmp25 = tl.load(in_ptr2 + (60 + x0 + 4 * x1), tmp14 & xmask, other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp14, tmp26, tmp27)
tmp29 = tmp5 & tmp13
tmp30 = tmp16 & tmp29
tmp31 = tl.load(in_out_ptr0 + x2, tmp30 & xmask, other=0.0)
tmp32 = tl.load(in_out_ptr0 + x2, tmp29 & xmask, other=0.0)
tmp33 = tl.where(tmp16, tmp31, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp29, tmp33, tmp34)
tmp36 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp37 = tl.where(tmp5, tmp35, tmp36)
tmp38 = tl.where(tmp11, tmp28, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp13, tmp38, tmp39)
tmp41 = tmp16 & tmp13
tmp42 = tl.load(in_out_ptr0 + x2, tmp41 & xmask, other=0.0)
tmp43 = tl.where(tmp16, tmp42, tmp36)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp13, tmp43, tmp44)
tmp46 = tl.load(in_out_ptr0 + x2, tmp12 & xmask, other=0.0)
tmp47 = tl.where(tmp5, tmp45, tmp46)
tmp48 = tl.where(tmp5, tmp40, tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp12, tmp48, tmp49)
tmp51 = tmp5 & tmp5
tmp52 = tmp11 & tmp51
tmp53 = tmp5 & tmp52
tmp54 = tmp16 & tmp53
tmp55 = tl.load(in_out_ptr0 + x2, tmp54 & xmask, other=0.0)
tmp56 = tl.load(in_out_ptr0 + x2, tmp53 & xmask, other=0.0)
tmp57 = tl.where(tmp16, tmp55, tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp53, tmp57, tmp58)
tmp60 = tl.load(in_out_ptr0 + x2, tmp52 & xmask, other=0.0)
tmp61 = tl.where(tmp5, tmp59, tmp60)
tmp62 = tl.load(in_ptr2 + (60 + x0 + 4 * x1), tmp52 & xmask, other=0.0)
tmp63 = tmp61 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp52, tmp63, tmp64)
tmp66 = tmp5 & tmp51
tmp67 = tmp16 & tmp66
tmp68 = tl.load(in_out_ptr0 + x2, tmp67 & xmask, other=0.0)
tmp69 = tl.load(in_out_ptr0 + x2, tmp66 & xmask, other=0.0)
tmp70 = tl.where(tmp16, tmp68, tmp69)
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp66, tmp70, tmp71)
tmp73 = tl.load(in_out_ptr0 + x2, tmp51 & xmask, other=0.0)
tmp74 = tl.where(tmp5, tmp72, tmp73)
tmp75 = tl.where(tmp11, tmp65, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp51, tmp75, tmp76)
tmp78 = tmp16 & tmp51
tmp79 = tl.load(in_out_ptr0 + x2, tmp78 & xmask, other=0.0)
tmp80 = tl.where(tmp16, tmp79, tmp73)
tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype)
tmp82 = tl.where(tmp51, tmp80, tmp81)
tmp83 = tl.load(in_out_ptr0 + x2, tmp5 & xmask, other=0.0)
tmp84 = tl.where(tmp5, tmp82, tmp83)
tmp85 = tl.where(tmp5, tmp77, tmp84)
tmp86 = tl.where(tmp11, tmp50, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp5, tmp86, tmp87)
tmp89 = tl.load(in_ptr2 + (60 + x0 + 4 * x1), tmp12 & xmask, other=0.0)
tmp90 = tmp47 + tmp89
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp12, tmp90, tmp91)
tmp93 = tl.where(tmp11, tmp92, tmp84)
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp5, tmp93, tmp94)
tmp96 = tmp16 & tmp5
tmp97 = tl.load(in_out_ptr0 + x2, tmp96 & xmask, other=0.0)
tmp98 = tl.where(tmp16, tmp97, tmp83)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp5, tmp98, tmp99)
tmp102 = tl.where(tmp5, tmp100, tmp101)
tmp103 = tl.where(tmp5, tmp95, tmp102)
tmp104 = tl.where(tmp5, tmp88, tmp103)
tmp105 = tl.where(tmp5, tmp7, tmp104)
tmp106 = tl.where(tmp5, tmp6, tmp105)
tl.store(in_out_ptr0 + x2, tmp106, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp287 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp3 >= tmp1
tmp12 = tmp3 < tmp4
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp0 >= tmp6
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr0 + (128 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (128 + x2), tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_ptr0 + (128 + x2), tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_ptr0 + (128 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (128 + x2), tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_ptr0 + (128 + x2), tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_ptr0 + (128 + x2), tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (128 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp9
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_ptr0 + (128 + x2), tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_ptr0 + (128 + x2), tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_ptr0 + (128 + x2), tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_ptr0 + (128 + x2), tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (128 + x2), tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_ptr0 + (128 + x2), tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (128 + x2), tmp9 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp2, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tmp13 & tmp2
tmp109 = tmp15 & tmp108
tmp110 = tmp13 & tmp109
tmp111 = tmp15 & tmp110
tmp112 = tl.load(in_ptr0 + (128 + x2), tmp111 & xmask, other=0.0)
tmp113 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp111 & xmask, other=0.0)
tmp114 = tmp112 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp111, tmp114, tmp115)
tmp117 = tl.load(in_ptr0 + (128 + x2), tmp110 & xmask, other=0.0)
tmp118 = tl.where(tmp15, tmp116, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp110, tmp118, tmp119)
tmp121 = tl.load(in_ptr0 + (128 + x2), tmp109 & xmask, other=0.0)
tmp122 = tl.where(tmp13, tmp120, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp109, tmp122, tmp123)
tmp125 = tmp13 & tmp108
tmp126 = tmp15 & tmp125
tmp127 = tl.load(in_ptr0 + (128 + x2), tmp126 & xmask, other=0.0)
tmp128 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp126 & xmask, other=0.0)
tmp129 = tmp127 + tmp128
tmp130 = tl.full(tmp129.shape, 0.0, tmp129.dtype)
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.load(in_ptr0 + (128 + x2), tmp125 & xmask, other=0.0)
tmp133 = tl.where(tmp15, tmp131, tmp132)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp125, tmp133, tmp134)
tmp136 = tl.load(in_ptr0 + (128 + x2), tmp108 & xmask, other=0.0)
tmp137 = tl.where(tmp13, tmp135, tmp136)
tmp138 = tl.where(tmp15, tmp124, tmp137)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp108, tmp138, tmp139)
tmp141 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp109 & xmask, other=0.0)
tmp142 = tmp121 + tmp141
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp109, tmp142, tmp143)
tmp145 = tl.where(tmp15, tmp144, tmp136)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp108, tmp145, tmp146)
tmp148 = tl.load(in_ptr0 + (128 + x2), tmp2 & xmask, other=0.0)
tmp149 = tl.where(tmp13, tmp147, tmp148)
tmp150 = tl.where(tmp13, tmp140, tmp149)
tmp151 = tl.where(tmp8, tmp107, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp2, tmp151, tmp152)
tmp154 = tmp2 & tmp8
tmp155 = tmp13 & tmp154
tmp156 = tmp15 & tmp155
tmp157 = tmp13 & tmp156
tmp158 = tmp15 & tmp157
tmp159 = tl.load(in_ptr0 + (128 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (128 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp15, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (128 + x2), tmp156 & xmask, other=0.0)
tmp169 = tl.where(tmp13, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp156, tmp169, tmp170)
tmp172 = tmp13 & tmp155
tmp173 = tmp15 & tmp172
tmp174 = tl.load(in_ptr0 + (128 + x2), tmp173 & xmask, other=0.0)
tmp175 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp173 & xmask, other=0.0)
tmp176 = tmp174 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp173, tmp176, tmp177)
tmp179 = tl.load(in_ptr0 + (128 + x2), tmp172 & xmask, other=0.0)
tmp180 = tl.where(tmp15, tmp178, tmp179)
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp172, tmp180, tmp181)
tmp183 = tl.load(in_ptr0 + (128 + x2), tmp155 & xmask, other=0.0)
tmp184 = tl.where(tmp13, tmp182, tmp183)
tmp185 = tl.where(tmp15, tmp171, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp155, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp156 & xmask, other=0.0)
tmp189 = tmp168 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp156, tmp189, tmp190)
tmp192 = tl.where(tmp15, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp155, tmp192, tmp193)
tmp195 = tl.load(in_ptr0 + (128 + x2), tmp154 & xmask, other=0.0)
tmp196 = tl.where(tmp13, tmp194, tmp195)
tmp197 = tl.where(tmp13, tmp187, tmp196)
tmp198 = tl.load(in_ptr1 + (128 + x0 + 4 * x1), tmp154 & xmask, other=0.0)
tmp199 = tmp197 + tmp198
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp154, tmp199, tmp200)
tmp202 = tmp13 & tmp8
tmp203 = tmp15 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tmp15 & tmp204
tmp206 = tl.load(in_ptr0 + (128 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (128 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp15, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (128 + x2), tmp203 & xmask, other=0.0)
tmp216 = tl.where(tmp13, tmp214, tmp215)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp203, tmp216, tmp217)
tmp219 = tmp13 & tmp202
tmp220 = tmp15 & tmp219
tmp221 = tl.load(in_ptr0 + (128 + x2), tmp220 & xmask, other=0.0)
tmp222 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp220 & xmask, other=0.0)
tmp223 = tmp221 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp220, tmp223, tmp224)
tmp226 = tl.load(in_ptr0 + (128 + x2), tmp219 & xmask, other=0.0)
tmp227 = tl.where(tmp15, tmp225, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp219, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (128 + x2), tmp202 & xmask, other=0.0)
tmp231 = tl.where(tmp13, tmp229, tmp230)
tmp232 = tl.where(tmp15, tmp218, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp202, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp203 & xmask, other=0.0)
tmp236 = tmp215 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp203, tmp236, tmp237)
tmp239 = tl.where(tmp15, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp202, tmp239, tmp240)
tmp242 = tl.load(in_ptr0 + (128 + x2), tmp8 & xmask, other=0.0)
tmp243 = tl.where(tmp13, tmp241, tmp242)
tmp244 = tl.where(tmp13, tmp234, tmp243)
tmp245 = tl.where(tmp2, tmp201, tmp244)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp8, tmp245, tmp246)
tmp248 = tmp15 & tmp13
tmp249 = tmp13 & tmp248
tmp250 = tmp15 & tmp249
tmp251 = tl.load(in_ptr0 + (128 + x2), tmp250 & xmask, other=0.0)
tmp252 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp250 & xmask, other=0.0)
tmp253 = tmp251 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp250, tmp253, tmp254)
tmp256 = tl.load(in_ptr0 + (128 + x2), tmp249 & xmask, other=0.0)
tmp257 = tl.where(tmp15, tmp255, tmp256)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp249, tmp257, tmp258)
tmp260 = tl.load(in_ptr0 + (128 + x2), tmp248 & xmask, other=0.0)
tmp261 = tl.where(tmp13, tmp259, tmp260)
tmp262 = tl.full(tmp261.shape, 0.0, tmp261.dtype)
tmp263 = tl.where(tmp248, tmp261, tmp262)
tmp264 = tmp13 & tmp13
tmp265 = tmp15 & tmp264
tmp266 = tl.load(in_ptr0 + (128 + x2), tmp265 & xmask, other=0.0)
tmp267 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp265 & xmask, other=0.0)
tmp268 = tmp266 + tmp267
tmp269 = tl.full(tmp268.shape, 0.0, tmp268.dtype)
tmp270 = tl.where(tmp265, tmp268, tmp269)
tmp271 = tl.load(in_ptr0 + (128 + x2), tmp264 & xmask, other=0.0)
tmp272 = tl.where(tmp15, tmp270, tmp271)
tmp273 = tl.full(tmp272.shape, 0.0, tmp272.dtype)
tmp274 = tl.where(tmp264, tmp272, tmp273)
tmp275 = tl.load(in_ptr0 + (128 + x2), tmp13 & xmask, other=0.0)
tmp276 = tl.where(tmp13, tmp274, tmp275)
tmp277 = tl.where(tmp15, tmp263, tmp276)
tmp278 = tl.full(tmp277.shape, 0.0, tmp277.dtype)
tmp279 = tl.where(tmp13, tmp277, tmp278)
tmp280 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp248 & xmask, other=0.0)
tmp281 = tmp260 + tmp280
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp248, tmp281, tmp282)
tmp284 = tl.where(tmp15, tmp283, tmp275)
tmp285 = tl.full(tmp284.shape, 0.0, tmp284.dtype)
tmp286 = tl.where(tmp13, tmp284, tmp285)
tmp288 = tl.where(tmp13, tmp286, tmp287)
tmp289 = tl.where(tmp13, tmp279, tmp288)
tmp290 = tl.where(tmp8, tmp247, tmp289)
tmp291 = tl.where(tmp2, tmp153, tmp290)
tl.store(out_ptr0 + x2, tmp291, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp147 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-128 + x2), tmp5 & xmask, other=0.0)
tmp7 = x0
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp9 & tmp5
tmp11 = tmp0 >= tmp8
tmp12 = tmp0 < tmp1
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp7 >= tmp3
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_out_ptr0 + x2, tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + x2, tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_out_ptr0 + x2, tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_out_ptr0 + x2, tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_out_ptr0 + x2, tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_out_ptr0 + x2, tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_out_ptr0 + x2, tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (96 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp5
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_out_ptr0 + x2, tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_out_ptr0 + x2, tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_out_ptr0 + x2, tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_out_ptr0 + x2, tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_out_ptr0 + x2, tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_out_ptr0 + x2, tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_out_ptr0 + x2, tmp5 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp9, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp5, tmp105, tmp106)
tmp108 = tmp15 & tmp13
tmp109 = tmp13 & tmp108
tmp110 = tmp15 & tmp109
tmp111 = tl.load(in_out_ptr0 + x2, tmp110 & xmask, other=0.0)
tmp112 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp110 & xmask, other=0.0)
tmp113 = tmp111 + tmp112
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.load(in_out_ptr0 + x2, tmp109 & xmask, other=0.0)
tmp117 = tl.where(tmp15, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp109, tmp117, tmp118)
tmp120 = tl.load(in_out_ptr0 + x2, tmp108 & xmask, other=0.0)
tmp121 = tl.where(tmp13, tmp119, tmp120)
tmp122 = tl.full(tmp121.shape, 0.0, tmp121.dtype)
tmp123 = tl.where(tmp108, tmp121, tmp122)
tmp124 = tmp13 & tmp13
tmp125 = tmp15 & tmp124
tmp126 = tl.load(in_out_ptr0 + x2, tmp125 & xmask, other=0.0)
tmp127 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp125 & xmask, other=0.0)
tmp128 = tmp126 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp125, tmp128, tmp129)
tmp131 = tl.load(in_out_ptr0 + x2, tmp124 & xmask, other=0.0)
tmp132 = tl.where(tmp15, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp124, tmp132, tmp133)
tmp135 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp136 = tl.where(tmp13, tmp134, tmp135)
tmp137 = tl.where(tmp15, tmp123, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp13, tmp137, tmp138)
tmp140 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp108 & xmask, other=0.0)
tmp141 = tmp120 + tmp140
tmp142 = tl.full(tmp141.shape, 0.0, tmp141.dtype)
tmp143 = tl.where(tmp108, tmp141, tmp142)
tmp144 = tl.where(tmp15, tmp143, tmp135)
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp13, tmp144, tmp145)
tmp148 = tl.where(tmp13, tmp146, tmp147)
tmp149 = tl.where(tmp13, tmp139, tmp148)
tmp150 = tl.where(tmp5, tmp107, tmp149)
tmp151 = tl.where(tmp5, tmp6, tmp150)
tmp152 = tmp7 >= tmp1
tmp153 = tmp7 < tmp3
tmp154 = tmp152 & tmp153
tmp155 = tmp154 & tmp5
tmp156 = tmp5 & tmp155
tmp157 = tmp7 >= tmp8
tmp158 = tmp7 < tmp1
tmp159 = tmp157 & tmp158
tmp160 = tmp159 & tmp156
tmp161 = tmp5 & tmp160
tmp162 = tmp159 & tmp161
tmp163 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp162 & xmask, other=0.0)
tmp164 = tmp151 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp162, tmp164, tmp165)
tmp167 = tl.where(tmp159, tmp166, tmp151)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp161, tmp167, tmp168)
tmp170 = tl.where(tmp5, tmp169, tmp151)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp160, tmp170, tmp171)
tmp173 = tmp5 & tmp156
tmp174 = tmp159 & tmp173
tmp175 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp174 & xmask, other=0.0)
tmp176 = tmp151 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp174, tmp176, tmp177)
tmp179 = tl.where(tmp159, tmp178, tmp151)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp173, tmp179, tmp180)
tmp182 = tl.where(tmp5, tmp181, tmp151)
tmp183 = tl.where(tmp159, tmp172, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp156, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp160 & xmask, other=0.0)
tmp187 = tmp151 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp160, tmp187, tmp188)
tmp190 = tl.where(tmp159, tmp189, tmp151)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp156, tmp190, tmp191)
tmp193 = tl.where(tmp5, tmp192, tmp151)
tmp194 = tl.where(tmp5, tmp185, tmp193)
tmp195 = tl.load(in_ptr1 + (120 + x0 + 4 * x1), tmp155 & xmask, other=0.0)
tmp196 = tmp194 + tmp195
tmp197 = tl.full(tmp196.shape, 0.0, tmp196.dtype)
tmp198 = tl.where(tmp155, tmp196, tmp197)
tmp199 = tmp5 & tmp5
tmp200 = tmp159 & tmp199
tmp201 = tmp5 & tmp200
tmp202 = tmp159 & tmp201
tmp203 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp202 & xmask, other=0.0)
tmp204 = tmp151 + tmp203
tmp205 = tl.full(tmp204.shape, 0.0, tmp204.dtype)
tmp206 = tl.where(tmp202, tmp204, tmp205)
tmp207 = tl.where(tmp159, tmp206, tmp151)
tmp208 = tl.full(tmp207.shape, 0.0, tmp207.dtype)
tmp209 = tl.where(tmp201, tmp207, tmp208)
tmp210 = tl.where(tmp5, tmp209, tmp151)
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp200, tmp210, tmp211)
tmp213 = tmp5 & tmp199
tmp214 = tmp159 & tmp213
tmp215 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp214 & xmask, other=0.0)
tmp216 = tmp151 + tmp215
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp214, tmp216, tmp217)
tmp219 = tl.where(tmp159, tmp218, tmp151)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp213, tmp219, tmp220)
tmp222 = tl.where(tmp5, tmp221, tmp151)
tmp223 = tl.where(tmp159, tmp212, tmp222)
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp199, tmp223, tmp224)
tmp226 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp200 & xmask, other=0.0)
tmp227 = tmp151 + tmp226
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp200, tmp227, tmp228)
tmp230 = tl.where(tmp159, tmp229, tmp151)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp199, tmp230, tmp231)
tmp233 = tl.where(tmp5, tmp232, tmp151)
tmp234 = tl.where(tmp5, tmp225, tmp233)
tmp235 = tl.where(tmp154, tmp198, tmp234)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp5, tmp235, tmp236)
tmp238 = tmp159 & tmp5
tmp239 = tmp5 & tmp238
tmp240 = tmp159 & tmp239
tmp241 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp240 & xmask, other=0.0)
tmp242 = tmp151 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp159, tmp244, tmp151)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp5, tmp247, tmp151)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp159, tmp250, tmp233)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp5, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp238 & xmask, other=0.0)
tmp255 = tmp151 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp159, tmp257, tmp151)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp5, tmp258, tmp259)
tmp261 = tl.where(tmp5, tmp260, tmp151)
tmp262 = tl.where(tmp5, tmp253, tmp261)
tmp263 = tl.where(tmp5, tmp237, tmp262)
tl.store(in_out_ptr0 + x2, tmp263, xmask)
@triton.jit
def triton_poi_fused_add_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp198 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 12 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp0 >= tmp6
tmp11 = tmp10 & tmp9
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp0 >= tmp4
tmp16 = tmp0 < tmp6
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_ptr0 + (192 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr0 + (192 + x2), tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (192 + x2), tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (192 + x2), tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_ptr0 + (192 + x2), tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_ptr0 + (192 + x2), tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp9
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_ptr0 + (192 + x2), tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_ptr0 + (192 + x2), tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_ptr0 + (192 + x2), tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_ptr0 + (192 + x2), tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp9, tmp87, tmp88)
tmp90 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp9, tmp94, tmp95)
tmp97 = tmp17 & tmp9
tmp98 = tl.load(in_ptr0 + (192 + x2), tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp9, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (192 + x2), tmp2 & xmask, other=0.0)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.load(in_ptr1 + (192 + x0 + 4 * x1), tmp2 & xmask, other=0.0)
tmp107 = tmp105 + tmp106
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp2, tmp107, tmp108)
tmp110 = tmp10 & tmp8
tmp111 = tmp8 & tmp110
tmp112 = tmp10 & tmp111
tmp113 = tmp8 & tmp112
tmp114 = tmp17 & tmp113
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp114 & xmask, other=0.0)
tmp116 = tl.load(in_ptr0 + (192 + x2), tmp113 & xmask, other=0.0)
tmp117 = tl.where(tmp17, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp113, tmp117, tmp118)
tmp120 = tl.load(in_ptr0 + (192 + x2), tmp112 & xmask, other=0.0)
tmp121 = tl.where(tmp8, tmp119, tmp120)
tmp122 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp112 & xmask, other=0.0)
tmp123 = tmp121 + tmp122
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp112, tmp123, tmp124)
tmp126 = tmp8 & tmp111
tmp127 = tmp17 & tmp126
tmp128 = tl.load(in_ptr0 + (192 + x2), tmp127 & xmask, other=0.0)
tmp129 = tl.load(in_ptr0 + (192 + x2), tmp126 & xmask, other=0.0)
tmp130 = tl.where(tmp17, tmp128, tmp129)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp126, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp111 & xmask, other=0.0)
tmp134 = tl.where(tmp8, tmp132, tmp133)
tmp135 = tl.where(tmp10, tmp125, tmp134)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp111, tmp135, tmp136)
tmp138 = tmp17 & tmp111
tmp139 = tl.load(in_ptr0 + (192 + x2), tmp138 & xmask, other=0.0)
tmp140 = tl.where(tmp17, tmp139, tmp133)
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp111, tmp140, tmp141)
tmp143 = tl.load(in_ptr0 + (192 + x2), tmp110 & xmask, other=0.0)
tmp144 = tl.where(tmp8, tmp142, tmp143)
tmp145 = tl.where(tmp8, tmp137, tmp144)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp110, tmp145, tmp146)
tmp148 = tmp8 & tmp8
tmp149 = tmp10 & tmp148
tmp150 = tmp8 & tmp149
tmp151 = tmp17 & tmp150
tmp152 = tl.load(in_ptr0 + (192 + x2), tmp151 & xmask, other=0.0)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp150 & xmask, other=0.0)
tmp154 = tl.where(tmp17, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp150, tmp154, tmp155)
tmp157 = tl.load(in_ptr0 + (192 + x2), tmp149 & xmask, other=0.0)
tmp158 = tl.where(tmp8, tmp156, tmp157)
tmp159 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp149 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp149, tmp160, tmp161)
tmp163 = tmp8 & tmp148
tmp164 = tmp17 & tmp163
tmp165 = tl.load(in_ptr0 + (192 + x2), tmp164 & xmask, other=0.0)
tmp166 = tl.load(in_ptr0 + (192 + x2), tmp163 & xmask, other=0.0)
tmp167 = tl.where(tmp17, tmp165, tmp166)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp163, tmp167, tmp168)
tmp170 = tl.load(in_ptr0 + (192 + x2), tmp148 & xmask, other=0.0)
tmp171 = tl.where(tmp8, tmp169, tmp170)
tmp172 = tl.where(tmp10, tmp162, tmp171)
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp148, tmp172, tmp173)
tmp175 = tmp17 & tmp148
tmp176 = tl.load(in_ptr0 + (192 + x2), tmp175 & xmask, other=0.0)
tmp177 = tl.where(tmp17, tmp176, tmp170)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp148, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp181 = tl.where(tmp8, tmp179, tmp180)
tmp182 = tl.where(tmp8, tmp174, tmp181)
tmp183 = tl.where(tmp10, tmp147, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp8, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp110 & xmask, other=0.0)
tmp187 = tmp144 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp110, tmp187, tmp188)
tmp190 = tl.where(tmp10, tmp189, tmp181)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp8, tmp190, tmp191)
tmp193 = tmp17 & tmp8
tmp194 = tl.load(in_ptr0 + (192 + x2), tmp193 & xmask, other=0.0)
tmp195 = tl.where(tmp17, tmp194, tmp180)
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp8, tmp195, tmp196)
tmp199 = tl.where(tmp8, tmp197, tmp198)
tmp200 = tl.where(tmp8, tmp192, tmp199)
tmp201 = tl.where(tmp8, tmp185, tmp200)
tmp202 = tl.where(tmp2, tmp109, tmp201)
tmp203 = tmp3 >= tmp6
tmp203 & tmp2
tmp205 = tl.where(tmp203, tmp202, tmp105)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp2, tmp205, tmp206)
tmp208 = tl.where(tmp203, tmp202, tmp201)
tmp209 = tl.where(tmp2, tmp207, tmp208)
tl.store(out_ptr0 + x2, tmp202, xmask)
tl.store(out_ptr1 + x2, tmp209, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp102 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-192 + x2), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (-192 + x2), tmp2 & xmask, other=0.0)
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp0 >= tmp5
tmp7 = tmp0 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp10 & tmp8
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp9 >= tmp5
tmp16 = tmp9 < tmp1
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_out_ptr0 + x2, tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_out_ptr0 + x2, tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr2 + (132 + x0 + 4 * x1), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_out_ptr0 + x2, tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_out_ptr0 + x2, tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_out_ptr0 + x2, tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_out_ptr0 + x2, tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_out_ptr0 + x2, tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp8
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_out_ptr0 + x2, tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_out_ptr0 + x2, tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_out_ptr0 + x2, tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr2 + (132 + x0 + 4 * x1), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_out_ptr0 + x2, tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_out_ptr0 + x2, tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + x2, tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_out_ptr0 + x2, tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_out_ptr0 + x2, tmp8 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp8, tmp87, tmp88)
tmp90 = tl.load(in_ptr2 + (132 + x0 + 4 * x1), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp8, tmp94, tmp95)
tmp97 = tmp17 & tmp8
tmp98 = tl.load(in_out_ptr0 + x2, tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp8, tmp99, tmp100)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.where(tmp2, tmp4, tmp105)
tmp107 = tl.where(tmp2, tmp3, tmp106)
tl.store(in_out_ptr0 + x2, tmp107, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp259 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 12 + x1
tmp7 = tmp6 >= tmp3
tmp8 = tmp7 & tmp5
tmp9 = tmp5 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 >= tmp11
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp7 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + (192 + x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_ptr0 + (192 + x2), tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_ptr0 + (192 + x2), tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp7, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp7 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_ptr0 + (192 + x2), tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp7, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp7, tmp53, tmp54)
tmp56 = tl.where(tmp7, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (216 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp7 & tmp8
tmp62 = tmp14 & tmp61
tmp63 = tmp7 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_ptr0 + (192 + x2), tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp7, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp7 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_ptr0 + (192 + x2), tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_ptr0 + (192 + x2), tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp7, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp102 = tl.where(tmp7, tmp100, tmp101)
tmp103 = tl.where(tmp7, tmp93, tmp102)
tmp104 = tl.where(tmp5, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp8, tmp104, tmp105)
tmp107 = tmp14 & tmp8
tmp108 = tmp7 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_ptr0 + (192 + x2), tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_ptr0 + (192 + x2), tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp7, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp8, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp8, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp5 & xmask, other=0.0)
tmp134 = tl.where(tmp7, tmp132, tmp133)
tmp135 = tl.where(tmp7, tmp125, tmp134)
tmp136 = tl.where(tmp7, tmp106, tmp135)
tmp137 = tl.full(tmp136.shape, 0.0, tmp136.dtype)
tmp138 = tl.where(tmp5, tmp136, tmp137)
tmp139 = tmp5 & tmp7
tmp140 = tmp7 & tmp139
tmp141 = tmp14 & tmp140
tmp142 = tmp7 & tmp141
tmp143 = tmp14 & tmp142
tmp144 = tl.load(in_ptr0 + (192 + x2), tmp143 & xmask, other=0.0)
tmp145 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp143 & xmask, other=0.0)
tmp146 = tmp144 + tmp145
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp143, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (192 + x2), tmp142 & xmask, other=0.0)
tmp150 = tl.where(tmp14, tmp148, tmp149)
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp142, tmp150, tmp151)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp141 & xmask, other=0.0)
tmp154 = tl.where(tmp7, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp141, tmp154, tmp155)
tmp157 = tmp7 & tmp140
tmp158 = tmp14 & tmp157
tmp159 = tl.load(in_ptr0 + (192 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (192 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp14, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (192 + x2), tmp140 & xmask, other=0.0)
tmp169 = tl.where(tmp7, tmp167, tmp168)
tmp170 = tl.where(tmp14, tmp156, tmp169)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp140, tmp170, tmp171)
tmp173 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp141 & xmask, other=0.0)
tmp174 = tmp153 + tmp173
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp141, tmp174, tmp175)
tmp177 = tl.where(tmp14, tmp176, tmp168)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp140, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp139 & xmask, other=0.0)
tmp181 = tl.where(tmp7, tmp179, tmp180)
tmp182 = tl.where(tmp7, tmp172, tmp181)
tmp183 = tl.load(in_ptr1 + (216 + x0 + 4 * x1), tmp139 & xmask, other=0.0)
tmp184 = tmp182 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp139, tmp184, tmp185)
tmp187 = tmp7 & tmp7
tmp188 = tmp14 & tmp187
tmp189 = tmp7 & tmp188
tmp190 = tmp14 & tmp189
tmp191 = tl.load(in_ptr0 + (192 + x2), tmp190 & xmask, other=0.0)
tmp192 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp190 & xmask, other=0.0)
tmp193 = tmp191 + tmp192
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp190, tmp193, tmp194)
tmp196 = tl.load(in_ptr0 + (192 + x2), tmp189 & xmask, other=0.0)
tmp197 = tl.where(tmp14, tmp195, tmp196)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp189, tmp197, tmp198)
tmp200 = tl.load(in_ptr0 + (192 + x2), tmp188 & xmask, other=0.0)
tmp201 = tl.where(tmp7, tmp199, tmp200)
tmp202 = tl.full(tmp201.shape, 0.0, tmp201.dtype)
tmp203 = tl.where(tmp188, tmp201, tmp202)
tmp204 = tmp7 & tmp187
tmp205 = tmp14 & tmp204
tmp206 = tl.load(in_ptr0 + (192 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (192 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp14, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (192 + x2), tmp187 & xmask, other=0.0)
tmp216 = tl.where(tmp7, tmp214, tmp215)
tmp217 = tl.where(tmp14, tmp203, tmp216)
tmp218 = tl.full(tmp217.shape, 0.0, tmp217.dtype)
tmp219 = tl.where(tmp187, tmp217, tmp218)
tmp220 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp188 & xmask, other=0.0)
tmp221 = tmp200 + tmp220
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp188, tmp221, tmp222)
tmp224 = tl.where(tmp14, tmp223, tmp215)
tmp225 = tl.full(tmp224.shape, 0.0, tmp224.dtype)
tmp226 = tl.where(tmp187, tmp224, tmp225)
tmp227 = tl.load(in_ptr0 + (192 + x2), tmp7 & xmask, other=0.0)
tmp228 = tl.where(tmp7, tmp226, tmp227)
tmp229 = tl.where(tmp7, tmp219, tmp228)
tmp230 = tl.where(tmp5, tmp186, tmp229)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp7, tmp230, tmp231)
tmp233 = tmp14 & tmp7
tmp234 = tmp7 & tmp233
tmp235 = tmp14 & tmp234
tmp236 = tl.load(in_ptr0 + (192 + x2), tmp235 & xmask, other=0.0)
tmp237 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp235 & xmask, other=0.0)
tmp238 = tmp236 + tmp237
tmp239 = tl.full(tmp238.shape, 0.0, tmp238.dtype)
tmp240 = tl.where(tmp235, tmp238, tmp239)
tmp241 = tl.load(in_ptr0 + (192 + x2), tmp234 & xmask, other=0.0)
tmp242 = tl.where(tmp14, tmp240, tmp241)
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp234, tmp242, tmp243)
tmp245 = tl.load(in_ptr0 + (192 + x2), tmp233 & xmask, other=0.0)
tmp246 = tl.where(tmp7, tmp244, tmp245)
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp233, tmp246, tmp247)
tmp249 = tl.where(tmp14, tmp248, tmp228)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp7, tmp249, tmp250)
tmp252 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp233 & xmask, other=0.0)
tmp253 = tmp245 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp233, tmp253, tmp254)
tmp256 = tl.where(tmp14, tmp255, tmp227)
tmp257 = tl.full(tmp256.shape, 0.0, tmp256.dtype)
tmp258 = tl.where(tmp7, tmp256, tmp257)
tmp260 = tl.where(tmp7, tmp258, tmp259)
tmp261 = tl.where(tmp7, tmp251, tmp260)
tmp262 = tl.where(tmp7, tmp232, tmp261)
tmp263 = tl.where(tmp5, tmp138, tmp262)
tl.store(out_ptr0 + x2, tmp263, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp133 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-192 + x2), tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp4 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp4 >= tmp11
tmp13 = tmp4 < tmp5
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp2 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_out_ptr0 + x2, tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + x2, tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_out_ptr0 + x2, tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp2, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp2 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_out_ptr0 + x2, tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_out_ptr0 + x2, tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_out_ptr0 + x2, tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp2, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_out_ptr0 + x2, tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp2, tmp53, tmp54)
tmp56 = tl.where(tmp2, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (168 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp2 & tmp2
tmp62 = tmp14 & tmp61
tmp63 = tmp2 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_out_ptr0 + x2, tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_out_ptr0 + x2, tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + x2, tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp2, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp2 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_out_ptr0 + x2, tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_out_ptr0 + x2, tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_out_ptr0 + x2, tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp2, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_out_ptr0 + x2, tmp2 & xmask, other=0.0)
tmp102 = tl.where(tmp2, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp93, tmp102)
tmp104 = tl.where(tmp8, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp2, tmp104, tmp105)
tmp107 = tmp14 & tmp2
tmp108 = tmp2 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_out_ptr0 + x2, tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_out_ptr0 + x2, tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_out_ptr0 + x2, tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp2, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp2, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp2, tmp130, tmp131)
tmp134 = tl.where(tmp2, tmp132, tmp133)
tmp135 = tl.where(tmp2, tmp125, tmp134)
tmp136 = tl.where(tmp2, tmp106, tmp135)
tmp137 = tl.where(tmp2, tmp3, tmp136)
tmp138 = tmp4 >= tmp1
tmp139 = tmp138 & tmp2
tmp140 = tmp2 & tmp139
tmp141 = tmp138 & tmp140
tmp142 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp141 & xmask, other=0.0)
tmp143 = tmp137 + tmp142
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp141, tmp143, tmp144)
tmp146 = tl.where(tmp138, tmp145, tmp137)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp140, tmp146, tmp147)
tmp149 = tl.where(tmp2, tmp148, tmp137)
tmp150 = tl.full(tmp149.shape, 0.0, tmp149.dtype)
tmp151 = tl.where(tmp139, tmp149, tmp150)
tmp152 = tmp138 & tmp61
tmp153 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp152 & xmask, other=0.0)
tmp154 = tmp137 + tmp153
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp152, tmp154, tmp155)
tmp157 = tl.where(tmp138, tmp156, tmp137)
tmp158 = tl.full(tmp157.shape, 0.0, tmp157.dtype)
tmp159 = tl.where(tmp61, tmp157, tmp158)
tmp160 = tl.where(tmp2, tmp159, tmp137)
tmp161 = tl.where(tmp138, tmp151, tmp160)
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp2, tmp161, tmp162)
tmp164 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp139 & xmask, other=0.0)
tmp165 = tmp137 + tmp164
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp139, tmp165, tmp166)
tmp168 = tl.where(tmp138, tmp167, tmp137)
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp2, tmp168, tmp169)
tmp171 = tl.where(tmp2, tmp170, tmp137)
tmp172 = tl.where(tmp2, tmp163, tmp171)
tl.store(in_out_ptr0 + x2, tmp172, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_1[grid(64)](buf0, arg0_1, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
buf3 = buf2
del buf2
triton_poi_fused_add_zeros_2[grid(256)](buf3, buf1, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_add_3[grid(64)](buf3, arg0_1, buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf6 = buf3
del buf3
triton_poi_fused_add_4[grid(256)](buf6, buf5, buf4, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_5[grid(64)](buf6, arg0_1, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
buf9 = buf8
del buf8
triton_poi_fused_add_6[grid(256)](buf9, buf7, arg0_1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf10 = buf7
del buf7
buf11 = buf4
del buf4
triton_poi_fused_add_7[grid(64)](buf9, arg0_1, buf10, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused_add_8[grid(256)](buf12, buf11, buf10, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf10
buf13 = buf11
del buf11
triton_poi_fused_9[grid(64)](buf12, arg0_1, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = buf12
del buf12
buf15 = buf14
del buf14
triton_poi_fused_add_10[grid(256)](buf15, buf13, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf13
return buf15,
class Truncation2DNew(torch.nn.Module):
"""
A module merging the last two dimensions, merging coarse scale in grid
of dimensions -4, -3 and finer resolution in dimensions -2, -1 to
one fine grained grid with two dimensions less.
"""
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| kpoeppel/pytorch_probgraph | Truncation2D | false | 15,972 | [
"BSD-3-Clause"
] | 47 | b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0 | https://github.com/kpoeppel/pytorch_probgraph/tree/b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0 |
PAM_Module | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yl/cyl57twtgf3lzd5sst7snomgtzysir6mpvrzx6jm7k4lxpcq6sru.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out_2 => convolution_3
# out_3 => add
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 0, 1), 0), out=buf4)
buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 1, 4, 4), (16, 16, 4, 1))
buf9 = reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf9, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out=buf10)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 1, 4, 4), (16, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf12, primals_9, primals_1, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_8, buf7, reinterpret_tensor(buf10, (4, 1, 4, 4), (16, 16, 4, 1), 0), reinterpret_tensor(buf9, (4, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class PAM_Module(nn.Module):
""" Position attention module"""
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
4, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
4, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
4, kernel_size=1)
self.W = nn.Conv2d(in_channels=in_dim // 4, out_channels=in_dim,
kernel_size=1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, _C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, -1, height, width)
out = self.W(out)
out = out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf1
triton_poi_fused_convolution_0[grid(64)](buf3, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf3, (4, 1, 16), (16, 0, 1), 0), out=buf4)
buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 1, 4, 4), (16, 16, 4, 1))
buf9 = reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf8
triton_poi_fused_convolution_0[grid(64)](buf9, primals_7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf9, (4, 1, 16), (16, 0, 1),
0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out
=buf10)
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 1,
4, 4), (16, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_add_convolution_2[grid(256)](buf12, primals_9,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_8,
buf7, reinterpret_tensor(buf10, (4, 1, 4, 4), (16, 16, 4, 1), 0),
reinterpret_tensor(buf9, (4, 16, 1), (16, 1, 16), 0),
reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0),
reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 16), 0))
class PAM_ModuleNew(nn.Module):
""" Position attention module"""
def __init__(self, in_dim):
super(PAM_ModuleNew, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
4, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
4, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
4, kernel_size=1)
self.W = nn.Conv2d(in_channels=in_dim // 4, out_channels=in_dim,
kernel_size=1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_2 = self.query_conv.weight
primals_3 = self.query_conv.bias
primals_4 = self.key_conv.weight
primals_5 = self.key_conv.bias
primals_6 = self.value_conv.weight
primals_7 = self.value_conv.bias
primals_8 = self.W.weight
primals_9 = self.W.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| lzrobots/dgmn | PAM_Module | false | 15,973 | [
"MIT"
] | 54 | 515476b5c6a07dcc3b7a4d2243c541377624bb33 | https://github.com/lzrobots/dgmn/tree/515476b5c6a07dcc3b7a4d2243c541377624bb33 |
resnet_block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vz/cvzohqculyvsf5iw7mqoyne5q3vwkkmjlyeio42e3ns6g5p45plj.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# output_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_3, 0), kwargs = {})
triton_poi_fused_leaky_relu_leaky_relu_backward_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/76/c76rwxvn5t4pfgsttygailmz4eydg5bnjmv2rwl3szsnm33tgova.py
# Topologically Sorted Source Nodes: [output_3, output_4], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# output_3 => add
# output_4 => gt_1, mul_1, where_1
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_4, %primals_3), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add, %mul_1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {})
triton_poi_fused_add_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = tmp9 > tmp5
tl.store(in_out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_3, output_4], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_add_leaky_relu_leaky_relu_backward_1.run(buf3, primals_5, primals_3, buf4, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class resnet_block(nn.Module):
def __init__(self, ef_dim):
super(resnet_block, self).__init__()
self.ef_dim = ef_dim
self.conv_1 = nn.Conv3d(self.ef_dim, self.ef_dim, 1, stride=1,
padding=0, bias=True)
self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim, 1, stride=1,
padding=0, bias=True)
def forward(self, input):
output = self.conv_1(input)
output = F.leaky_relu(output, negative_slope=0.01, inplace=True)
output = self.conv_2(output)
output = output + input
output = F.leaky_relu(output, negative_slope=0.01, inplace=True)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ef_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_leaky_relu_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = tmp9 > tmp5
tl.store(in_out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4,
4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_leaky_relu_backward_1[grid(256)](buf3,
primals_5, primals_3, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), buf4, buf5
class resnet_blockNew(nn.Module):
def __init__(self, ef_dim):
super(resnet_blockNew, self).__init__()
self.ef_dim = ef_dim
self.conv_1 = nn.Conv3d(self.ef_dim, self.ef_dim, 1, stride=1,
padding=0, bias=True)
self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim, 1, stride=1,
padding=0, bias=True)
def forward(self, input_0):
primals_1 = self.conv_1.weight
primals_2 = self.conv_1.bias
primals_4 = self.conv_2.weight
primals_5 = self.conv_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| lwkobe/NMC | resnet_block | false | 15,974 | [
"MIT"
] | 74 | a59c187d35b2f929ea3a94fc2b434061d7f7993a | https://github.com/lwkobe/NMC/tree/a59c187d35b2f929ea3a94fc2b434061d7f7993a |
StableLayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oe/coebxigj7j6ue72ms24jkj6buvn6h6erxmsynorkgerga43qetvm.py
# Topologically Sorted Source Nodes: [amax, x], Original ATen: [aten.amax, aten.div]
# Source node to ATen node mapping:
# amax => amax
# x => div
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %amax), kwargs = {})
triton_poi_fused_amax_div_0 = async_compile.triton('triton_poi_fused_amax_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_amax_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6n/c6nwltytpo33ssumvxlcryrpvlql2hsjrmxl624j4dkkjxt5qgkm.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%div, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/47/c47tzz4cth6lfbu5kqpncq6q6qn3bgfukypr5564qp5el2f2h5lx.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%div, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [amax, x], Original ATen: [aten.amax, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_amax_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf3, buf1, buf2, primals_2, primals_3, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_2
del primals_3
return (buf3, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class StableLayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = x / x.amax(dim=-1, keepdim=True).detach()
return self.norm(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_amax_div_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused_native_layer_norm_2[grid(256)](buf3, buf1, buf2,
primals_2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del buf2
del primals_2
del primals_3
return buf3, primals_1
class StableLayerNormNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.norm = nn.LayerNorm(dim)
def forward(self, input_0):
primals_2 = self.norm.weight
primals_3 = self.norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lucidrains/nuwa-pytorch | StableLayerNorm | false | 15,975 | [
"MIT"
] | 310 | bf1f3dc1126ba0a24a280bd7412a8082e5013b46 | https://github.com/lucidrains/nuwa-pytorch/tree/bf1f3dc1126ba0a24a280bd7412a8082e5013b46 |
DDM_Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/a7/ca7z2ryh26grcwmvwvtxkjp6epy7ogfqxwbxg6r3q7tqzhxlcfw2.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t3/ct3pjb6mtfdnep3btrkdjqoc66xio4cxbi6rd2bipbfdxst7priw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x_1 => expm1_1, gt_1, mul_3, mul_5, where_1
# Graph fragment:
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {})
# %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {})
triton_poi_fused_elu_1 = async_compile.triton('triton_poi_fused_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (288, 4), (4, 1))
assert_size_stride(primals_2, (288, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 288), (288, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 288), (288, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 288), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 288), (4608, 1152, 288, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf0, buf1, 18432, grid=grid(18432), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 288), (288, 1), 0), reinterpret_tensor(primals_4, (288, 4), (1, 288), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu]
triton_poi_fused_elu_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 288), (288, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((288, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((288, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 288), (288, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_Decoder(torch.nn.Module):
def __init__(self, obs_space, dim):
super(DDM_Decoder, self).__init__()
self.fc = nn.Linear(dim, 32 * 3 * 3)
self.linear1 = nn.Linear(32 * 3 * 3, dim)
self.linear2 = nn.Linear(dim, obs_space)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x = F.elu(self.fc(inputs))
x = F.elu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_space': 4, 'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (288, 4), (4, 1))
assert_size_stride(primals_2, (288,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 288), (288, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 288), (288, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 288), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 288), (4608, 1152, 288, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(18432)](buf0, buf1, 18432, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 288),
(288, 1), 0), reinterpret_tensor(primals_4, (288, 4), (1, 288),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_elu_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 288), (288, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), primals_6, primals_4
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_DecoderNew(torch.nn.Module):
def __init__(self, obs_space, dim):
super(DDM_DecoderNew, self).__init__()
self.fc = nn.Linear(dim, 32 * 3 * 3)
self.linear1 = nn.Linear(32 * 3 * 3, dim)
self.linear2 = nn.Linear(dim, obs_space)
self.apply(weights_init)
self.train()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| lysuk96/rl_representations | DDM_Decoder | false | 15,976 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
FinalTanh | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# z_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ot/cotd7jkaehusj5owdg3vudotf5av32ehzqpj4x4vuxj6vddzz67e.py
# Topologically Sorted Source Nodes: [z_3], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# z_3 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_4,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [z_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf3, primals_5, 1024, grid=grid(1024), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class FinalTanh(torch.nn.Module):
def __init__(self, input_channels, hidden_channels,
hidden_hidden_channels, num_hidden_layers):
super(FinalTanh, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels,
hidden_hidden_channels)
self.linears = torch.nn.ModuleList(torch.nn.Linear(
hidden_hidden_channels, hidden_hidden_channels) for _ in range(
num_hidden_layers - 1))
self.linear_out = torch.nn.Linear(hidden_hidden_channels,
input_channels * hidden_channels)
def extra_repr(self):
return (
'input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}'
.format(self.input_channels, self.hidden_channels, self.
hidden_hidden_channels, self.num_hidden_layers))
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
for linear in self.linears:
z = linear(z)
z = z.relu()
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels,
self.input_channels)
z = z.tanh()
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4,
'hidden_hidden_channels': 4, 'num_hidden_layers': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf2
triton_poi_fused_tanh_1[grid(1024)](buf3, primals_5, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4
class FinalTanhNew(torch.nn.Module):
def __init__(self, input_channels, hidden_channels,
hidden_hidden_channels, num_hidden_layers):
super(FinalTanhNew, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels,
hidden_hidden_channels)
self.linears = torch.nn.ModuleList(torch.nn.Linear(
hidden_hidden_channels, hidden_hidden_channels) for _ in range(
num_hidden_layers - 1))
self.linear_out = torch.nn.Linear(hidden_hidden_channels,
input_channels * hidden_channels)
def extra_repr(self):
return (
'input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}'
.format(self.input_channels, self.hidden_channels, self.
hidden_hidden_channels, self.num_hidden_layers))
def forward(self, input_0):
primals_1 = self.linear_in.weight
primals_2 = self.linear_in.bias
primals_4 = self.linear_out.weight
primals_5 = self.linear_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| lysuk96/rl_representations | FinalTanh | false | 15,977 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
MixLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yn/cynlflanva6jb6ue2wnlf4grebetypve3egklc3mz5lpfrc2nb3s.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# loss => add
# mul => mul_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 0.0), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mul_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 * tmp1
tmp19 = tmp18 + tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import filterfalse
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def isnan(x):
return x != x
def mean(l, ignore_nan=True, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = filterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-07):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.eps = eps
def forward(self, output, target):
output = torch.sigmoid(output)
if torch.sum(target) == 0:
output = 1.0 - output
target = 1.0 - target
return 1.0 - (2 * torch.sum(output * target) + self.smooth) / (
torch.sum(output) + torch.sum(target) + self.smooth + self.eps)
class FocalLoss(nn.Module):
def __init__(self, gamma=2, eps=1e-07):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
def forward(self, logit, target):
prob = torch.sigmoid(logit)
prob = prob.clamp(self.eps, 1.0 - self.eps)
loss = -1 * target * torch.log(prob)
loss = loss * (1 - logit) ** self.gamma
return loss.sum()
class LovaszHinge(nn.Module):
def __init__(self, activation=lambda x: F.elu(x, inplace=True) + 1.0,
per_image=True, ignore=None):
super(LovaszHinge, self).__init__()
self.activation = activation
self.per_image = per_image
self.ignore = ignore
def lovasz_hinge_flat(self, logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * signs
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(self.activation(errors_sorted), grad)
return loss
def forward(self, logits, labels):
if self.per_image:
loss = mean(self.lovasz_hinge_flat(*flatten_binary_scores(log.
unsqueeze(0), lab.unsqueeze(0), self.ignore)) for log, lab in
zip(logits, labels))
else:
loss = self.lovasz_hinge_flat(*flatten_binary_scores(logits,
labels, self.ignore))
return loss
class MixLoss(nn.Module):
def __init__(self, bce_w=1.0, dice_w=0.0, focal_w=0.0, lovasz_w=0.0,
bce_kwargs={}, dice_kwargs={}, focal_kwargs={}, lovasz_kwargs={}):
super(MixLoss, self).__init__()
self.bce_w = bce_w
self.dice_w = dice_w
self.focal_w = focal_w
self.lovasz_w = lovasz_w
self.bce_loss = nn.BCEWithLogitsLoss(**bce_kwargs)
self.dice_loss = DiceLoss(**dice_kwargs)
self.focal_loss = FocalLoss(**focal_kwargs)
self.lovasz_loss = LovaszHinge(**lovasz_kwargs)
def forward(self, output, target):
loss = 0.0
if self.bce_w:
loss += self.bce_w * self.bce_loss(output, target)
if self.dice_w:
loss += self.dice_w * self.dice_loss(output, target)
if self.focal_w:
loss += self.focal_w * self.focal_loss(output, target)
if self.lovasz_w:
loss += self.lovasz_w * self.lovasz_loss(output, target)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from itertools import filterfalse
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 * tmp1
tmp19 = tmp18 + tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mul_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def isnan(x):
return x != x
def mean(l, ignore_nan=True, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = filterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-07):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.eps = eps
def forward(self, output, target):
output = torch.sigmoid(output)
if torch.sum(target) == 0:
output = 1.0 - output
target = 1.0 - target
return 1.0 - (2 * torch.sum(output * target) + self.smooth) / (
torch.sum(output) + torch.sum(target) + self.smooth + self.eps)
class FocalLoss(nn.Module):
def __init__(self, gamma=2, eps=1e-07):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
def forward(self, logit, target):
prob = torch.sigmoid(logit)
prob = prob.clamp(self.eps, 1.0 - self.eps)
loss = -1 * target * torch.log(prob)
loss = loss * (1 - logit) ** self.gamma
return loss.sum()
class LovaszHinge(nn.Module):
def __init__(self, activation=lambda x: F.elu(x, inplace=True) + 1.0,
per_image=True, ignore=None):
super(LovaszHinge, self).__init__()
self.activation = activation
self.per_image = per_image
self.ignore = ignore
def lovasz_hinge_flat(self, logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * signs
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(self.activation(errors_sorted), grad)
return loss
def forward(self, logits, labels):
if self.per_image:
loss = mean(self.lovasz_hinge_flat(*flatten_binary_scores(log.
unsqueeze(0), lab.unsqueeze(0), self.ignore)) for log, lab in
zip(logits, labels))
else:
loss = self.lovasz_hinge_flat(*flatten_binary_scores(logits,
labels, self.ignore))
return loss
class MixLossNew(nn.Module):
def __init__(self, bce_w=1.0, dice_w=0.0, focal_w=0.0, lovasz_w=0.0,
bce_kwargs={}, dice_kwargs={}, focal_kwargs={}, lovasz_kwargs={}):
super(MixLossNew, self).__init__()
self.bce_w = bce_w
self.dice_w = dice_w
self.focal_w = focal_w
self.lovasz_w = lovasz_w
self.bce_loss = nn.BCEWithLogitsLoss(**bce_kwargs)
self.dice_loss = DiceLoss(**dice_kwargs)
self.focal_loss = FocalLoss(**focal_kwargs)
self.lovasz_loss = LovaszHinge(**lovasz_kwargs)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lyakaap/pytorch-template | MixLoss | false | 15,978 | [
"MIT"
] | 140 | eff9f0a4dd50fa49c3b949065247598d5eabc91e | https://github.com/lyakaap/pytorch-template/tree/eff9f0a4dd50fa49c3b949065247598d5eabc91e |
ResidualBlockNoBN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lm/clmneuks6sx5dgl2umedoyuqhtcgjyvqbcszrrcdyj3vdy5verfg.py
# Topologically Sorted Source Nodes: [out, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# out => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
triton_poi_fused_add_convolution_mul_1 = async_compile.triton('triton_poi_fused_add_convolution_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_out_ptr0 + (x3), None)
tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 1048576, grid=grid(1048576), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_1.run(buf3, primals_1, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
return (buf3, primals_1, primals_2, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
@torch.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
class ResidualBlockNoBN(nn.Module):
"""Residual block without BN.
It has a style of:
---Conv-ReLU-Conv-+-
|________________|
Args:
num_feat (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Residual scale. Default: 1.
pytorch_init (bool): If set to True, use pytorch default init,
otherwise, use default_init_weights. Default: False.
"""
def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
super(ResidualBlockNoBN, self).__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
if not pytorch_init:
default_init_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out * self.res_scale
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_out_ptr0 + x3, None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_3,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_mul_1[grid(1048576)](buf3,
primals_1, primals_5, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1
@torch.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
class ResidualBlockNoBNNew(nn.Module):
"""Residual block without BN.
It has a style of:
---Conv-ReLU-Conv-+-
|________________|
Args:
num_feat (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Residual scale. Default: 1.
pytorch_init (bool): If set to True, use pytorch default init,
otherwise, use default_init_weights. Default: False.
"""
def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
super(ResidualBlockNoBNNew, self).__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
if not pytorch_init:
default_init_weights([self.conv1, self.conv2], 0.1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ljzycmd/SimDeblur | ResidualBlockNoBN | false | 15,979 | [
"MIT"
] | 190 | dd2f60c41176b75c4eaf80d740f547c206aa8227 | https://github.com/ljzycmd/SimDeblur/tree/dd2f60c41176b75c4eaf80d740f547c206aa8227 |
_GRU_ODE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4f/c4f6c75k7irztm2jhnqp7o72nlug4e57ksu7cvtpagj3tabsb65t.py
# Topologically Sorted Source Nodes: [r, r_1, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.sigmoid_backward]
# Source node to ATen node mapping:
# mul => mul
# r => add
# r_1 => sigmoid
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_5), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_4), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x2), xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp5
tmp10 = tmp5 * tmp9
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/m4/cm4iqqyruxiu5sccf4ac3y52papxjbureepfrvk3xk4fcjvjkda3.py
# Topologically Sorted Source Nodes: [z, z_1, g, g_1, sub, sub_1, mul_1], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.rsub, aten.sub, aten.mul]
# Source node to ATen node mapping:
# g => add_2
# g_1 => tanh
# mul_1 => mul_1
# sub => sub
# sub_1 => sub_1
# z => add_1
# z_1 => sigmoid_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %view_11), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%tanh, %primals_5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + (x2), xmask)
tmp7 = tl.load(in_ptr2 + (x2), xmask)
tmp8 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x2), xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = 1.0
tmp13 = tmp12 - tmp5
tmp15 = tmp11 - tmp14
tmp16 = tmp13 * tmp15
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
tl.store(in_out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r, r_1, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.sigmoid_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0.run(buf0, buf1, primals_4, primals_5, buf6, buf10, 256, grid=grid(256), stream=stream0)
del primals_4
buf7 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [z, z_1, g, g_1, sub, sub_1, mul_1], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.rsub, aten.sub, aten.mul]
triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1.run(buf4, buf8, buf3, primals_8, buf7, primals_11, primals_5, buf9, 256, grid=grid(256), stream=stream0)
del buf3
del buf7
del primals_11
del primals_8
return (buf9, primals_5, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf6, (64, 4), (4, 1), 0), buf8, primals_10, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class _GRU_ODE(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(_GRU_ODE, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.W_r = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_z = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_h = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.U_r = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_z = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_h = torch.nn.Linear(hidden_channels, hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, x, h):
r = self.W_r(x) + self.U_r(h)
r = r.sigmoid()
z = self.W_z(x) + self.U_z(h)
z = z.sigmoid()
g = self.W_h(x) + self.U_h(r * h)
g = g.tanh()
return (1 - z) * (g - h)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp5
tmp10 = tmp5 * tmp9
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp8 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = 1.0
tmp13 = tmp12 - tmp5
tmp15 = tmp11 - tmp14
tmp16 = tmp13 * tmp15
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0,
buf1, primals_4, primals_5, buf6, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf7 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1[grid(256)](buf4,
buf8, buf3, primals_8, buf7, primals_11, primals_5, buf9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del buf7
del primals_11
del primals_8
return buf9, primals_5, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf6, (64, 4), (4, 1), 0
), buf8, primals_10, buf10
class _GRU_ODENew(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(_GRU_ODENew, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.W_r = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_z = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_h = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.U_r = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_z = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_h = torch.nn.Linear(hidden_channels, hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, input_0, input_1):
primals_1 = self.W_r.weight
primals_3 = self.W_z.weight
primals_6 = self.W_h.weight
primals_7 = self.U_r.weight
primals_4 = self.U_r.bias
primals_9 = self.U_z.weight
primals_8 = self.U_z.bias
primals_10 = self.U_h.weight
primals_11 = self.U_h.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| lysuk96/rl_representations | _GRU_ODE | false | 15,980 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
baseRNN_predict | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 64), (64, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [obs], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.init as init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class baseRNN_predict(nn.Module):
def __init__(self, h_size, obs_dim, num_actions, context_input=False):
super(baseRNN_predict, self).__init__()
self.l1 = nn.Linear(h_size, 64)
self.l2 = nn.Linear(64, 128)
self.l3 = nn.Linear(128, obs_dim)
self.apply(weights_init)
def forward(self, h):
h = torch.relu(self.l1(h))
h = torch.relu(self.l2(h))
obs = self.l3(h)
return obs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'h_size': 4, 'obs_dim': 4, 'num_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 64), (64, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class baseRNN_predictNew(nn.Module):
def __init__(self, h_size, obs_dim, num_actions, context_input=False):
super(baseRNN_predictNew, self).__init__()
self.l1 = nn.Linear(h_size, 64)
self.l2 = nn.Linear(64, 128)
self.l3 = nn.Linear(128, obs_dim)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| lysuk96/rl_representations | baseRNN_predict | false | 15,981 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
SparseDownSampleClose | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qb/cqbgxo3ah7exgkjgz3p7rm3nr2l2fptl2jmjo3mwd5gksxmhs3ow.py
# Topologically Sorted Source Nodes: [sub, neg, mul, encode_d, max_pool2d, mask_result, d, sub_2, mul_1, d_result], Original ATen: [aten.rsub, aten.neg, aten.mul, aten.sub, aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# d => neg_1
# d_result => sub_3
# encode_d => sub_1
# mask_result => getitem_2
# max_pool2d => _low_memory_max_pool2d_with_offsets
# mul => mul
# mul_1 => mul_1
# neg => neg
# sub => sub
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, 600), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%sub_1, [1, 1], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 600), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_1, %mul_1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp6 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = -tmp2
tmp4 = 600.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp8 = -tmp7
tmp9 = tmp2 * tmp4
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, neg, mul, encode_d, max_pool2d, mask_result, d, sub_2, mul_1, d_result], Original ATen: [aten.rsub, aten.neg, aten.mul, aten.sub, aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0.run(buf2, arg0_1, arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class SparseDownSampleClose(nn.Module):
def __init__(self, stride):
super(SparseDownSampleClose, self).__init__()
self.pooling = nn.MaxPool2d(stride, stride)
self.large_number = 600
def forward(self, d, mask):
encode_d = -(1 - mask) * self.large_number - d
d = -self.pooling(encode_d)
mask_result = self.pooling(mask)
d_result = d - (1 - mask_result) * self.large_number
return d_result, mask_result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = -tmp2
tmp4 = 600.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp8 = -tmp7
tmp9 = tmp2 * tmp4
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0[grid(256)](
buf2, arg0_1, arg1_1, buf1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf2, buf1
class SparseDownSampleCloseNew(nn.Module):
def __init__(self, stride):
super(SparseDownSampleCloseNew, self).__init__()
self.pooling = nn.MaxPool2d(stride, stride)
self.large_number = 600
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| maciej-3/PENet_ICRA2021 | SparseDownSampleClose | false | 15,982 | [
"MIT"
] | 155 | 40b5b20fb5d64455f8964045204fa9e7629d0c8c | https://github.com/maciej-3/PENet_ICRA2021/tree/40b5b20fb5d64455f8964045204fa9e7629d0c8c |
DynamicWeights | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pp/cppzwhavrbxqanhenab3phph2xb4f22v2zltxf5ldtyeh2jp7igd.py
# Topologically Sorted Source Nodes: [dynamic_filter1_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dynamic_filter1_1 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((16*r1) + (144*(x0 // 16)) + (x0 % 16)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (9*x0)), tmp11, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yu/cyu5u75oqgplo4p6f33zgdot6wehnhque5kb2bng573l3nmqmq7d.py
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = (xindex // 9)
y1 = (yindex // 16)
x5 = xindex
y4 = yindex
tmp0 = (-1) + (x2 // 3) + (y0 // 4)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + (x2 % 3) + (y0 % 4)
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + y0 + (4*(x2 // 3)) + (16*x3) + (64*y1) + (x2 % 3)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x5 + (36*y4)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b5/cb54eg4mzx64o2qoa4csllnbb5vnvq3ll4d5vwyjy23ujbjs75vm.py
# Topologically Sorted Source Nodes: [out1_1, mul, out], Original ATen: [aten.clone, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# out => add_2
# out1_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %clone_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_add_clone_mul_2 = async_compile.triton('triton_poi_fused_add_clone_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, YBLOCK])
tmp2 = tl.load(in_ptr1 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp4 = tl.load(in_ptr2 + (x2 + (16*y3)), xmask & ymask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2 + (16*y3)), tmp5, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [dynamic_filter1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 9, 4, 4), (144, 16, 4, 1))
buf3 = empty_strided_cuda((64, 9), (9, 1), torch.float32)
# Topologically Sorted Source Nodes: [dynamic_filter1_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(buf0, buf3, 64, 9, grid=grid(64), stream=stream0)
del buf0
buf4 = empty_strided_cuda((4, 16, 4, 9), (576, 36, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf4, 64, 36, grid=grid(64, 36), stream=stream0)
buf5 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 4, 9), (36, 9, 1), 0), reinterpret_tensor(buf3, (64, 9, 1), (9, 1, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out1_1, mul, out], Original ATen: [aten.clone, aten.mul, aten.add]
triton_poi_fused_add_clone_mul_2.run(primals_3, buf5, primals_1, buf6, 16, 16, grid=grid(16, 16), stream=stream0)
return (buf6, primals_1, primals_2, primals_3, buf3, buf5, reinterpret_tensor(buf4, (64, 9, 4), (36, 1, 9), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((9, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class DynamicWeights(nn.Module):
def __init__(self, channels):
super(DynamicWeights, self).__init__()
self.cata = nn.Conv2d(channels, 9, 3, padding=1, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.unfold1 = nn.Unfold(kernel_size=(3, 3), padding=1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
blur_depth = x
dynamic_filter1 = self.cata(blur_depth)
N, _C, H, W = blur_depth.size()
dynamic_filter1 = self.softmax(dynamic_filter1.permute(0, 2, 3, 1).
contiguous().view(N * H * W, -1))
xd_unfold1 = self.unfold1(blur_depth)
xd_unfold1 = xd_unfold1.contiguous().view(N, blur_depth.size()[1],
9, H * W).permute(0, 3, 1, 2).contiguous().view(N * H * W,
blur_depth.size()[1], 9)
out1 = torch.bmm(xd_unfold1, dynamic_filter1.unsqueeze(2))
out1 = out1.view(N, H, W, blur_depth.size()[1]).permute(0, 3, 1, 2
).contiguous()
out = self.gamma * out1 + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * r1 + 144 * (x0 // 16) + x0 % 16), rmask &
xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 9 * x0), tmp11, rmask & xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = xindex // 9
y1 = yindex // 16
x5 = xindex
y4 = yindex
tmp0 = -1 + x2 // 3 + y0 // 4
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x2 % 3 + y0 % 4
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + y0 + 4 * (x2 // 3) + 16 * x3 + 64 * y1 +
x2 % 3), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0
)
tl.store(out_ptr0 + (x5 + 36 * y4), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_add_clone_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, YBLOCK])
tmp2 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp4 = tl.load(in_ptr2 + (x2 + 16 * y3), xmask & ymask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2 + 16 * y3), tmp5, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 9, 4, 4), (144, 16, 4, 1))
buf3 = empty_strided_cuda((64, 9), (9, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf0, buf3, 64, 9, XBLOCK=32,
num_warps=4, num_stages=1)
del buf0
buf4 = empty_strided_cuda((4, 16, 4, 9), (576, 36, 9, 1), torch.float32
)
triton_poi_fused_clone_1[grid(64, 36)](primals_1, buf4, 64, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 4, 9), (36, 9, 1),
0), reinterpret_tensor(buf3, (64, 9, 1), (9, 1, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clone_mul_2[grid(16, 16)](primals_3, buf5,
primals_1, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
return (buf6, primals_1, primals_2, primals_3, buf3, buf5,
reinterpret_tensor(buf4, (64, 9, 4), (36, 1, 9), 0))
class DynamicWeightsNew(nn.Module):
def __init__(self, channels):
super(DynamicWeightsNew, self).__init__()
self.cata = nn.Conv2d(channels, 9, 3, padding=1, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.unfold1 = nn.Unfold(kernel_size=(3, 3), padding=1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, input_0):
primals_3 = self.gamma
primals_2 = self.cata.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lzrobots/dgmn | DynamicWeights | false | 15,983 | [
"MIT"
] | 54 | 515476b5c6a07dcc3b7a4d2243c541377624bb33 | https://github.com/lzrobots/dgmn/tree/515476b5c6a07dcc3b7a4d2243c541377624bb33 |
DDM_Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4l/c4lbzmi7meb4qwlq5mph3ex6vu2fyemq66xvbq4piu7obxmzvy3n.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x_1 => expm1_1, gt_1, mul_3, mul_5, where_1
# Graph fragment:
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {})
# %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {})
triton_poi_fused_elu_1 = async_compile.triton('triton_poi_fused_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_2 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (288, 4), (4, 1))
assert_size_stride(primals_5, (288, ), (1, ))
assert_size_stride(primals_6, (4, 288), (288, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 288), (288, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 288), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 288), (4608, 1152, 288, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu]
triton_poi_fused_elu_1.run(buf2, buf3, 18432, grid=grid(18432), stream=stream0)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 288), (288, 1), 0), reinterpret_tensor(primals_6, (288, 4), (1, 288), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (64, 288), (288, 1), 0), buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((288, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((288, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 288), (288, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_Encoder(torch.nn.Module):
def __init__(self, obs_space, dim, context_input=False, context_dim=0):
"""
architecture should be input, so that we can pass multiple jobs !
"""
super(DDM_Encoder, self).__init__()
if context_input:
self.linear1 = nn.Linear(obs_space + context_dim, dim)
else:
self.linear1 = nn.Linear(obs_space, dim)
self.linear2 = nn.Linear(dim, 32 * 3 * 3)
self.fc = nn.Linear(32 * 3 * 3, dim)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x = F.elu(self.linear1(inputs))
x = F.elu(self.linear2(x))
x = F.tanh(self.fc(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_space': 4, 'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (288, 4), (4, 1))
assert_size_stride(primals_5, (288,), (1,))
assert_size_stride(primals_6, (4, 288), (288, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 288), (288, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 288), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 288), (4608, 1152, 288, 1),
torch.float32)
triton_poi_fused_elu_1[grid(18432)](buf2, buf3, 18432, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 288), (288, 1), 0),
reinterpret_tensor(primals_6, (288, 4), (1, 288), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 288), (288, 1), 0
), buf5, primals_6, primals_4
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_EncoderNew(torch.nn.Module):
def __init__(self, obs_space, dim, context_input=False, context_dim=0):
"""
architecture should be input, so that we can pass multiple jobs !
"""
super(DDM_EncoderNew, self).__init__()
if context_input:
self.linear1 = nn.Linear(obs_space + context_dim, dim)
else:
self.linear1 = nn.Linear(obs_space, dim)
self.linear2 = nn.Linear(dim, 32 * 3 * 3)
self.fc = nn.Linear(32 * 3 * 3, dim)
self.apply(weights_init)
self.train()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.fc.weight
primals_7 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| lysuk96/rl_representations | DDM_Encoder | false | 15,984 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
SingleHiddenLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# z_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 128), (128, 1))
assert_size_stride(primals_5, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 16), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class SingleHiddenLayer(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(SingleHiddenLayer, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.linear1 = torch.nn.Linear(hidden_channels, 128)
self.linear2 = torch.nn.Linear(128, input_channels * hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, z):
z = self.linear1(z)
z = torch.relu(z)
z = self.linear2(z)
z = z.view(*z.shape[:-1], self.hidden_channels, self.input_channels)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 128), (128, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf3, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 16), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3
class SingleHiddenLayerNew(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(SingleHiddenLayerNew, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.linear1 = torch.nn.Linear(hidden_channels, 128)
self.linear2 = torch.nn.Linear(128, input_channels * hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| lysuk96/rl_representations | SingleHiddenLayer | false | 15,985 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
GeometryFeature | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ht/cht3iys3phvxlbyjckyukc3p63g6utngacelztmjhpv5nwxanb5z.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1, %arg3_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.load(in_ptr2 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tmp12 = tmp8 * tmp11
tmp13 = tl.load(in_ptr3 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp14 = tmp12 - tmp13
tmp15 = tmp5 * tmp14
tmp16 = tl.load(in_ptr4 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp17 = tmp15 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp4, tmp17, tmp18)
tmp20 = tmp0 >= tmp3
tmp21 = tl.full([1], 8, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr5 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp26 = tmp25 * tmp7
tmp27 = tl.load(in_ptr6 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp28 = tmp27 + tmp10
tmp29 = tmp26 * tmp28
tmp30 = tl.load(in_ptr7 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp31 = tmp29 - tmp30
tmp32 = tmp24 * tmp31
tmp33 = tl.load(in_ptr8 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp23 & xmask, other=0.0)
tmp34 = tmp32 / tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp23, tmp34, tmp35)
tmp37 = tmp0 >= tmp21
tmp38 = tl.full([1], 12, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tl.load(in_ptr0 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp37 & xmask, other=0.0)
tmp41 = tl.where(tmp23, tmp36, tmp40)
tmp42 = tl.where(tmp4, tmp19, tmp41)
tl.store(out_ptr0 + (x3), tmp42, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg7_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg8_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg3_1, arg0_1, arg1_1, arg2_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, buf0, 768, grid=grid(768), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
del arg7_1
del arg8_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg6_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg7_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg8_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class GeometryFeature(nn.Module):
def __init__(self):
super(GeometryFeature, self).__init__()
def forward(self, z, vnorm, unorm, h, w, ch, cw, fh, fw):
x = z * (0.5 * h * (vnorm + 1) - ch) / fh
y = z * (0.5 * w * (unorm + 1) - cw) / fw
return torch.cat((x, y, z), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.load(in_ptr2 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tmp12 = tmp8 * tmp11
tmp13 = tl.load(in_ptr3 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0
)
tmp14 = tmp12 - tmp13
tmp15 = tmp5 * tmp14
tmp16 = tl.load(in_ptr4 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0
)
tmp17 = tmp15 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp4, tmp17, tmp18)
tmp20 = tmp0 >= tmp3
tmp21 = tl.full([1], 8, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr5 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp26 = tmp25 * tmp7
tmp27 = tl.load(in_ptr6 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp28 = tmp27 + tmp10
tmp29 = tmp26 * tmp28
tmp30 = tl.load(in_ptr7 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp31 = tmp29 - tmp30
tmp32 = tmp24 * tmp31
tmp33 = tl.load(in_ptr8 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp34 = tmp32 / tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp23, tmp34, tmp35)
tmp37 = tmp0 >= tmp21
tl.full([1], 12, tl.int64)
tmp40 = tl.load(in_ptr0 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp37 &
xmask, other=0.0)
tmp41 = tl.where(tmp23, tmp36, tmp40)
tmp42 = tl.where(tmp4, tmp19, tmp41)
tl.store(out_ptr0 + x3, tmp42, xmask)
def call(args):
(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1
) = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg7_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg8_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](arg3_1, arg0_1, arg1_1, arg2_1,
arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, buf0, 768, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
del arg7_1
del arg8_1
return buf0,
class GeometryFeatureNew(nn.Module):
def __init__(self):
super(GeometryFeatureNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6, input_7, input_8):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
arg7_1 = input_7
arg8_1 = input_8
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1,
arg6_1, arg7_1, arg8_1])
return output[0]
| maciej-3/PENet_ICRA2021 | GeometryFeature | false | 15,986 | [
"MIT"
] | 155 | 40b5b20fb5d64455f8964045204fa9e7629d0c8c | https://github.com/maciej-3/PENet_ICRA2021/tree/40b5b20fb5d64455f8964045204fa9e7629d0c8c |
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xk/cxkmdbptgejnxqvvct3m5fm7bc7kxfsd3om2bbyghjd5mlsywsie.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gd/cgdtd7uw2iemby2kfb22fx3vkhdbrpyx2y2l6nq45fmox3ad7stv.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qs/cqsyda2m63ct5ijcfgcipyyfn273chi5d3kmpjuf5asa7h4wdpdv.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (100, 4), (4, 1))
assert_size_stride(primals_3, (1, 100), (100, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((16, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 100), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 100), (400, 100, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, 1600, grid=grid(1600), stream=stream0)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 100), (100, 1), 0), reinterpret_tensor(primals_3, (100, 1), (1, 100), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [M], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out=buf6)
return (buf6, reinterpret_tensor(buf5, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, buf5, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SelfAttention(nn.Module):
def __init__(self, hidden_size, attention_size=100, n_attention_heads=1):
super().__init__()
self.hidden_size = hidden_size
self.attention_size = attention_size
self.n_attention_heads = n_attention_heads
self.W1 = nn.Linear(hidden_size, attention_size, bias=False)
self.W2 = nn.Linear(attention_size, n_attention_heads, bias=False)
def forward(self, hidden):
hidden = hidden.transpose(0, 1)
x = torch.tanh(self.W1(hidden))
x = F.softmax(self.W2(x), dim=1)
A = x.transpose(1, 2)
M = A @ hidden
return M, A
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (100, 4), (4, 1))
assert_size_stride(primals_3, (1, 100), (100, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 100), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 100), (400, 100, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(1600)](buf2, 1600, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 100), (100, 1), 0),
reinterpret_tensor(primals_3, (100, 1), (1, 100), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out
=buf6)
return buf6, reinterpret_tensor(buf5, (4, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), buf2, buf5, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0
), primals_3
class SelfAttentionNew(nn.Module):
def __init__(self, hidden_size, attention_size=100, n_attention_heads=1):
super().__init__()
self.hidden_size = hidden_size
self.attention_size = attention_size
self.n_attention_heads = n_attention_heads
self.W1 = nn.Linear(hidden_size, attention_size, bias=False)
self.W2 = nn.Linear(attention_size, n_attention_heads, bias=False)
def forward(self, input_0):
primals_2 = self.W1.weight
primals_3 = self.W2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| maltevogl/CVDD-PyTorch | SelfAttention | false | 15,987 | [
"MIT"
] | 48 | 9299894720a8d3d0a329d92c9d2702f43112ff63 | https://github.com/maltevogl/CVDD-PyTorch/tree/9299894720a8d3d0a329d92c9d2702f43112ff63 |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# h => mul_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# tanh => tanh
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5, alpha=1, beta=1, out=buf2)
del primals_4
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x.contiguous().view(-1, x.size(-1))
x = torch.addmm(self.bias, x.contiguous().view(-1, x.size(-1)),
self.weight)
x = x.view(*size_out)
return x
class MLP(nn.Module):
def __init__(self, n_state, config):
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_state': 4, 'config': _mock_config(n_embd=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), primals_5, alpha=1, beta=1, out=buf2)
del primals_4
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_1, (4, 64), (1, 4), 0)
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x.contiguous().view(-1, x.size(-1))
x = torch.addmm(self.bias, x.contiguous().view(-1, x.size(-1)),
self.weight)
x = x.view(*size_out)
return x
class MLPNew(nn.Module):
def __init__(self, n_state, config):
super(MLPNew, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, input_0):
primals_3 = self.c_fc.weight
primals_2 = self.c_fc.bias
primals_5 = self.c_proj.weight
primals_4 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| mandaltanmoy1938/VisualGPT | MLP | false | 15,988 | [
"MIT"
] | 86 | 9ba78948282fdca502d5030f4eccc3df562982c3 | https://github.com/mandaltanmoy1938/VisualGPT/tree/9ba78948282fdca502d5030f4eccc3df562982c3 |
FC_Q | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# q => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sb/csbqfhl3tbhobxxibww6rnv4q33jyajqsvetse4kiun22xct43oo.py
# Topologically Sorted Source Nodes: [i_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# i_2 => relu_4
# Graph fragment:
# %relu_4 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/h4/ch4hu6nfniyge6mjev7hxoom67hh6g5wt742eougy6iak3mso6oo.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax, aten.threshold_backward]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu_4, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_4, %amax), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused__log_softmax_threshold_backward_2 = async_compile.triton('triton_poi_fused__log_softmax_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_threshold_backward_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = tmp0 <= tmp9
tl.store(out_ptr0 + (x3), tmp8, xmask)
tl.store(out_ptr1 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pu/cpuo34qggtttrulwolbrgtcihegg5pnmq65f46vdbaenzdat4oka.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (128, 4), (4, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (4, 128), (128, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf17, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf3 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [i], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_7, buf15, 8192, grid=grid(8192), stream=stream0)
del primals_7
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf5 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [i_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_9, buf14, 8192, grid=grid(8192), stream=stream0)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 128), (128, 1), 0), reinterpret_tensor(primals_10, (128, 4), (1, 128), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [i_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf8, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf9, primals_5, buf16, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 128), (128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax, aten.threshold_backward]
triton_poi_fused__log_softmax_threshold_backward_2.run(buf8, buf11, buf13, 256, grid=grid(256), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_3.run(buf11, buf12, 256, grid=grid(256), stream=stream0)
del buf11
return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf12, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(buf6, (64, 128), (128, 1), 0), reinterpret_tensor(buf9, (64, 128), (128, 1), 0), buf12, primals_12, buf13, primals_10, buf14, primals_8, buf15, buf16, primals_4, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions, num_nodes=128):
super(FC_Q, self).__init__()
self.q1 = nn.Linear(state_dim, num_nodes)
self.q2 = nn.Linear(num_nodes, num_nodes)
self.q3 = nn.Linear(num_nodes, num_actions)
self.i1 = nn.Linear(state_dim, num_nodes)
self.i2 = nn.Linear(num_nodes, num_nodes)
self.i3 = nn.Linear(num_nodes, num_actions)
def forward(self, state):
q = F.relu(self.q1(state))
q = F.relu(self.q2(q))
i = F.relu(self.i1(state))
i = F.relu(self.i2(i))
i = F.relu(self.i3(i))
return self.q3(q), F.log_softmax(i, dim=1), i
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'num_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_threshold_backward_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = tmp0 <= tmp9
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 4), (4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (4, 128), (128, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf17, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf3
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf4,
primals_7, buf15, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf6,
primals_9, buf14, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_10, (128, 4), (1, 128), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_relu_1[grid(256)](buf8, primals_11, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf16 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf9,
primals_5, buf16, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__log_softmax_threshold_backward_2[grid(256)](buf8,
buf11, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_3[grid(256)](buf11, buf12, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf12, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(buf4, (64, 128), (128, 1), 0),
reinterpret_tensor(buf6, (64, 128), (128, 1), 0),
reinterpret_tensor(buf9, (64, 128), (128, 1), 0), buf12, primals_12,
buf13, primals_10, buf14, primals_8, buf15, buf16, primals_4, buf17)
class FC_QNew(nn.Module):
def __init__(self, state_dim, num_actions, num_nodes=128):
super(FC_QNew, self).__init__()
self.q1 = nn.Linear(state_dim, num_nodes)
self.q2 = nn.Linear(num_nodes, num_nodes)
self.q3 = nn.Linear(num_nodes, num_actions)
self.i1 = nn.Linear(state_dim, num_nodes)
self.i2 = nn.Linear(num_nodes, num_nodes)
self.i3 = nn.Linear(num_nodes, num_actions)
def forward(self, input_0):
primals_1 = self.q1.weight
primals_2 = self.q1.bias
primals_4 = self.q2.weight
primals_5 = self.q2.bias
primals_10 = self.q3.weight
primals_11 = self.q3.bias
primals_6 = self.i1.weight
primals_7 = self.i1.bias
primals_8 = self.i2.weight
primals_9 = self.i2.bias
primals_12 = self.i3.weight
primals_13 = self.i3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1], output[2]
| lysuk96/rl_representations | FC_Q | false | 15,989 | [
"MIT"
] | 438 | 19de69305e40c9b3a1d746a7af26d232c9fb3f6f | https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f |
KLDivLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wi/cwi4xqpfpnt3svnpqfks5gncoee7olup7zijkszd57ervj23vwcs.py
# Topologically Sorted Source Nodes: [target_data], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# target_data => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 3), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.3333333333333333
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p4/cp4pcq7hvucmudh3cjqfadxkp23n2aaz7jyv57eaqdjhiubi2eiw.py
# Topologically Sorted Source Nodes: [target_data, target_data_1], Original ATen: [aten._softmax, aten.add]
# Source node to ATen node mapping:
# target_data => div_2, sum_2
# target_data_1 => add
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, 1e-07), kwargs = {})
triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mn/cmnb4msooda3qbyhqyqsbswnm6ocs4xjdqzr4hdtd6y4qbclco7b.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 3), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.3333333333333333
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pu/cpuo34qggtttrulwolbrgtcihegg5pnmq65f46vdbaenzdat4oka.py
# Topologically Sorted Source Nodes: [predict], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# predict => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [target_data], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [target_data, target_data_1], Original ATen: [aten._softmax, aten.add]
triton_poi_fused__softmax_add_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [predict], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_3.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import *
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class KLDivLoss(nn.Module):
def __init__(self):
super(KLDivLoss, self).__init__()
def forward(self, pred, label):
T = 3
predict = F.log_softmax(pred / T, dim=1)
target_data = F.softmax(label / T, dim=1)
target_data = target_data + 10 ** -7
target = Variable(target_data.data, requires_grad=False)
loss = T * T * ((target * (target.log() - predict)).sum(1).sum() /
target.size()[0])
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torchvision.transforms import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.3333333333333333
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.3333333333333333
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_1[grid(256)](buf0, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_2[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_3[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
return buf1, buf3
class KLDivLossNew(nn.Module):
def __init__(self):
super(KLDivLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mangye16/Cross-Modal-Re-ID-baseline | KLDivLoss | false | 15,990 | [
"MIT"
] | 249 | 26bc0ce088eb97867ff489dceda386b8092b9fde | https://github.com/mangye16/Cross-Modal-Re-ID-baseline/tree/26bc0ce088eb97867ff489dceda386b8092b9fde |
DRS | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yy/cyy4prolabwuzhygpsdjg736eei4sutv5ysoa7u2aebay7h26l6t.py
# Topologically Sorted Source Nodes: [x, adaptive_max_pool2d], Original ATen: [aten.relu, aten.adaptive_max_pool2d]
# Source node to ATen node mapping:
# adaptive_max_pool2d => adaptive_max_pool2d
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %adaptive_max_pool2d : [num_users=1] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%relu, [1, 1]), kwargs = {})
triton_poi_fused_adaptive_max_pool2d_relu_0 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = triton_helpers.maximum(tmp4, tmp2)
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = triton_helpers.maximum(tmp7, tmp5)
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = triton_helpers.maximum(tmp10, tmp8)
tmp13 = triton_helpers.maximum(tmp1, tmp12)
tmp14 = triton_helpers.maximum(tmp13, tmp11)
tmp16 = triton_helpers.maximum(tmp1, tmp15)
tmp17 = triton_helpers.maximum(tmp16, tmp14)
tmp19 = triton_helpers.maximum(tmp1, tmp18)
tmp20 = triton_helpers.maximum(tmp19, tmp17)
tmp22 = triton_helpers.maximum(tmp1, tmp21)
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp25 = triton_helpers.maximum(tmp1, tmp24)
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp28 = triton_helpers.maximum(tmp1, tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp31 = triton_helpers.maximum(tmp1, tmp30)
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp34 = triton_helpers.maximum(tmp1, tmp33)
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp37 = triton_helpers.maximum(tmp1, tmp36)
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp40 = triton_helpers.maximum(tmp1, tmp39)
tmp41 = triton_helpers.maximum(tmp40, tmp38)
tmp43 = triton_helpers.maximum(tmp1, tmp42)
tmp44 = triton_helpers.maximum(tmp43, tmp41)
tmp46 = triton_helpers.maximum(tmp1, tmp45)
tmp47 = triton_helpers.maximum(tmp46, tmp44)
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zz/czz2hfxzpqqtim7boxnqdhjgtlrj2riwt7wunqep3tpcjpkef4re.py
# Topologically Sorted Source Nodes: [x, mul, x_1], Original ATen: [aten.relu, aten.mul, aten.minimum]
# Source node to ATen node mapping:
# mul => mul
# x => relu
# x_1 => minimum
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, 4), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%relu, %mul), kwargs = {})
triton_poi_fused_minimum_mul_relu_1 = async_compile.triton('triton_poi_fused_minimum_mul_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_minimum_mul_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_minimum_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.minimum(tmp2, tmp5)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x, adaptive_max_pool2d], Original ATen: [aten.relu, aten.adaptive_max_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_relu_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mul, x_1], Original ATen: [aten.relu, aten.mul, aten.minimum]
triton_poi_fused_minimum_mul_relu_1.run(arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DRS(nn.Module):
"""
DRS non-learnable setting
hyperparameter O , additional training paramters X
"""
def __init__(self, delta):
super(DRS, self).__init__()
self.relu = nn.ReLU()
self.delta = delta
self.global_max_pool = nn.AdaptiveMaxPool2d(1)
def forward(self, x):
b, c, _, _ = x.size()
x = self.relu(x)
""" 1: max extractor """
x_max = self.global_max_pool(x).view(b, c, 1, 1)
x_max = x_max.expand_as(x)
""" 2: suppression controller"""
control = self.delta
""" 3: suppressor"""
x = torch.min(x, x_max * control)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'delta': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_relu_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = triton_helpers.maximum(tmp4, tmp2)
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = triton_helpers.maximum(tmp7, tmp5)
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = triton_helpers.maximum(tmp10, tmp8)
tmp13 = triton_helpers.maximum(tmp1, tmp12)
tmp14 = triton_helpers.maximum(tmp13, tmp11)
tmp16 = triton_helpers.maximum(tmp1, tmp15)
tmp17 = triton_helpers.maximum(tmp16, tmp14)
tmp19 = triton_helpers.maximum(tmp1, tmp18)
tmp20 = triton_helpers.maximum(tmp19, tmp17)
tmp22 = triton_helpers.maximum(tmp1, tmp21)
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp25 = triton_helpers.maximum(tmp1, tmp24)
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp28 = triton_helpers.maximum(tmp1, tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp31 = triton_helpers.maximum(tmp1, tmp30)
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp34 = triton_helpers.maximum(tmp1, tmp33)
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp37 = triton_helpers.maximum(tmp1, tmp36)
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp40 = triton_helpers.maximum(tmp1, tmp39)
tmp41 = triton_helpers.maximum(tmp40, tmp38)
tmp43 = triton_helpers.maximum(tmp1, tmp42)
tmp44 = triton_helpers.maximum(tmp43, tmp41)
tmp46 = triton_helpers.maximum(tmp1, tmp45)
tmp47 = triton_helpers.maximum(tmp46, tmp44)
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_minimum_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.minimum(tmp2, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_relu_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_minimum_mul_relu_1[grid(256)](arg0_1, buf0, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
return buf1,
class DRSNew(nn.Module):
"""
DRS non-learnable setting
hyperparameter O , additional training paramters X
"""
def __init__(self, delta):
super(DRSNew, self).__init__()
self.relu = nn.ReLU()
self.delta = delta
self.global_max_pool = nn.AdaptiveMaxPool2d(1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| manideep1108/DRS | DRS | false | 15,991 | [
"MIT"
] | 62 | 0858c3ffea310e9d504b7c2b06db5f281273df56 | https://github.com/manideep1108/DRS/tree/0858c3ffea310e9d504b7c2b06db5f281273df56 |
VGG16 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rn/crng7m5mguccwv3xvtgv4yl47k24ov5e26h7ejsq2geg3uuvz5og.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lf/clf45mspfcg7t5x4om2snxq42eoe4jywsisc72sbpggbkipki6jb.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qn/cqnvlz36e5n74qbwjehi6cgr4dntmtxxsduqflrrittcgu3yf256.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rw/crwxihz2xdn6vknnrjr5if7hyms65a7dv6ub7vsls72ck5xfuwfz.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7v/c7vlmp4ptmjjinootrsb47fer72573dvgxb4w77hrarddids2b3i.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yl/cylsyrzru64h3777bghq4brfo5xznorpgywpstgksyzpqwzecdey.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ge/cget5nqcqgmfplthkcx4uyh5p3254jiox3fz5gndtsq6x3tz7htc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 524288
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/is/cisv67wbtayxvweq3zuup7vz5ggkyk7ogfqvdtcenxk32kuw2gah.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dh/cdhxbkfmhejdlidurhhj3sinjzrczc4tfowdrhosuvb6ilr3gfwp.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rv/crv3uzu52jbc4u62gio2klk6cj5xhjt7yazr75tq67kvtteddsn5.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ln/cln6ukrvrwq2yidze6n7xij67rihdsxpkkrbbuf6ni5zieakmtkx.py
# Topologically Sorted Source Nodes: [conv2d, x, conv2d_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# conv2d_1 => convolution_1
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lu/clux7aevgdsnhtjtdkdp6pwanzhifldlf6muiuvqh227hizpvw4x.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 2048) % 32
x1 = (xindex // 64) % 32
x0 = xindex % 64
x5 = (xindex // 2048)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4160) + x0 + (128*x1) + (8192*x5)), tmp10, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x0 + (128*x1) + (8192*x5)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-4032) + x0 + (128*x1) + (8192*x5)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-64) + x0 + (128*x1) + (8192*x5)), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x5)), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x5)), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4032 + x0 + (128*x1) + (8192*x5)), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x5)), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x5)), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tl.store(out_ptr0 + (x6), tmp51, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/n3/cn34mbt2rtob3eeqb7butchvtwaa2lxs5ritiirymjwyzcwqeits.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xi/cxidc4r5nvzmgqw6uydniyux5jy6pcxtdrc4ndkkyvb55hucusew.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 2048) % 16
x1 = (xindex // 128) % 16
x0 = xindex % 128
x5 = (xindex // 2048)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4224) + x0 + (256*x1) + (8192*x5)), tmp10, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x0 + (256*x1) + (8192*x5)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3968) + x0 + (256*x1) + (8192*x5)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-128) + x0 + (256*x1) + (8192*x5)), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x5)), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x5)), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3968 + x0 + (256*x1) + (8192*x5)), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x5)), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x5)), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, None)
tl.store(out_ptr1 + (x6), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r4/cr4cxr5slxie5num5fkjya5y6p2mpesokrymomcbss4ipccdadwk.py
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fc/cfc56xho3taq6zxujkrbo5vrqblszxprdkpvj7o2qq5rmj57gmwd.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_9 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 2048) % 8
x1 = (xindex // 256) % 8
x0 = xindex % 256
x5 = (xindex // 2048)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4352) + x0 + (512*x1) + (8192*x5)), tmp10, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x0 + (512*x1) + (8192*x5)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3840) + x0 + (512*x1) + (8192*x5)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-256) + x0 + (512*x1) + (8192*x5)), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x5)), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x5)), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3840 + x0 + (512*x1) + (8192*x5)), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x5)), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x5)), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, None)
tl.store(out_ptr1 + (x6), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/63/c63ymadmqa5pewt6lz2e5vbnqla654yqubhkwemi5viikn2tjwlb.py
# Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_10 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hg/chg3owxajpnnkaztbgulow7nugph3ijagbis6kvfqyk742lqf6wt.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_13 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_17 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 4096) % 8
x1 = (xindex // 512) % 8
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4608) + x6), tmp10, other=float("-inf"))
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x6), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3584) + x6), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-512) + x6), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x6), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (512 + x6), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3584 + x6), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x6), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4608 + x6), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, None)
tl.store(out_ptr1 + (x6), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4o/c4oe7fptqnoo5uqexm4ny5m22bxvkxe45quom2tjrbsipnjsg6vt.py
# Topologically Sorted Source Nodes: [conv2d_13, x_17], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# x_17 => relu_13
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_12, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/do/cdoh2i4r6c75ujuvztesjcjyisrddwubb2w27jifo7k5b5co2p7x.py
# Topologically Sorted Source Nodes: [conv2d_14, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_14 => convolution_14
# x_19 => relu_14
# Graph fragment:
# %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_13, %primals_30, %primals_31, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_14,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_14, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_19 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 64], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 1024
y1 = (yindex // 1024)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (1024*x2) + (65536*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (64*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (1024*x2) + (65536*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (1024, ), (1, ))
assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_31, (1024, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_6, buf0, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_8, buf1, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_8
buf2 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_10, buf2, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_10
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_12, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_12
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_14, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_14
buf5 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_16, buf5, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_16
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_18, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_18
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_20, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_20
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_22, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_22
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_24, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_24
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_26, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_26
buf11 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_28, buf11, 524288, 9, grid=grid(524288, 9), stream=stream0)
del primals_28
buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(primals_3, buf12, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf13 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(primals_1, buf13, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
del buf12
del buf13
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf15, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
buf16 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x, conv2d_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(primals_4, buf16, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d, x, conv2d_1], Original ATen: [aten.convolution, aten.relu]
buf17 = extern_kernels.convolution(buf15, buf16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 64, 64), (262144, 1, 4096, 64))
del buf15
del buf16
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d, x, conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf18, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_11.run(buf18, buf19, 262144, grid=grid(262144), stream=stream0)
del buf18
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf21, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf23, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_13.run(buf23, buf24, buf25, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf24, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf27, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf29, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf31, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_15.run(buf31, buf32, buf33, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf32, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf35, primals_17, 131072, grid=grid(131072), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf37, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf39, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
buf41 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.int8)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_17.run(buf39, buf40, buf41, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf40, buf8, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf43, primals_23, 131072, grid=grid(131072), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf43, buf9, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf45, primals_25, 131072, grid=grid(131072), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, buf10, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf47 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [conv2d_12, x_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf47, primals_27, 131072, grid=grid(131072), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf49 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, x_17], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf49, primals_29, 262144, grid=grid(262144), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf50 = extern_kernels.convolution(buf49, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf51 = empty_strided_cuda((4, 1024, 8, 8), (65536, 64, 8, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_14, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_19.run(buf50, primals_31, buf51, buf52, 4096, 64, grid=grid(4096, 64), stream=stream0)
del buf50
del primals_31
return (buf51, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, primals_30, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf40, buf41, buf43, buf45, buf47, buf49, buf52, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((1024, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class Normalize:
def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def undo(self, imgarr):
proc_img = imgarr.copy()
proc_img[..., 0] = (self.std[0] * imgarr[..., 0] + self.mean[0]
) * 255.0
proc_img[..., 1] = (self.std[1] * imgarr[..., 1] + self.mean[1]
) * 255.0
proc_img[..., 2] = (self.std[2] * imgarr[..., 2] + self.mean[2]
) * 255.0
return proc_img
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255.0 - self.mean[0]) / self.std[0
]
proc_img[..., 1] = (imgarr[..., 1] / 255.0 - self.mean[1]) / self.std[1
]
proc_img[..., 2] = (imgarr[..., 2] / 255.0 - self.mean[2]) / self.std[2
]
return proc_img
class BaseNet(nn.Module):
def __init__(self):
super().__init__()
self.normalize = Normalize()
self.NormLayer = nn.BatchNorm2d
self.not_training = []
self.bn_frozen = []
self.from_scratch_layers = []
def _init_weights(self, path_to_weights):
None
weights_dict = torch.load(path_to_weights)
self.load_state_dict(weights_dict, strict=False)
def fan_out(self):
raise NotImplementedError
def fixed_layers(self):
return self.not_training
def _fix_running_stats(self, layer, fix_params=False):
if isinstance(layer, self.NormLayer):
self.bn_frozen.append(layer)
if fix_params and layer not in self.not_training:
self.not_training.append(layer)
elif isinstance(layer, list):
for m in layer:
self._fix_running_stats(m, fix_params)
else:
for m in layer.children():
self._fix_running_stats(m, fix_params)
def _fix_params(self, layer):
if isinstance(layer, nn.Conv2d) or isinstance(layer, self.NormLayer
) or isinstance(layer, nn.Linear):
self.not_training.append(layer)
if isinstance(layer, self.NormLayer):
self.bn_frozen.append(layer)
elif isinstance(layer, list):
for m in layer:
self._fix_params(m)
elif isinstance(layer, nn.Module):
if hasattr(layer, 'weight') or hasattr(layer, 'bias'):
None
for m in layer.children():
self._fix_params(m)
def _freeze_bn(self, layer):
if isinstance(layer, self.NormLayer):
layer.eval()
elif isinstance(layer, nn.Module):
for m in layer.children():
self._freeze_bn(m)
def train(self, mode=True):
super().train(mode)
for layer in self.not_training:
if hasattr(layer, 'weight') and layer.weight is not None:
layer.weight.requires_grad = False
if hasattr(layer, 'bias') and layer.bias is not None:
layer.bias.requires_grad = False
elif isinstance(layer, torch.nn.Module):
None
for bn_layer in self.bn_frozen:
self._freeze_bn(bn_layer)
def _lr_mult(self):
return 1.0, 2.0, 10.0, 20
def parameter_groups(self, base_lr, wd):
w_old, b_old, w_new, b_new = self._lr_mult()
groups = {'params': [], 'weight_decay': wd, 'lr': w_old * base_lr}, {
'params': [], 'weight_decay': 0.0, 'lr': b_old * base_lr}, {
'params': [], 'weight_decay': wd, 'lr': w_new * base_lr}, {'params'
: [], 'weight_decay': 0.0, 'lr': b_new * base_lr}
fixed_layers = self.fixed_layers()
for m in self.modules():
if m in fixed_layers:
continue
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear
) or isinstance(m, self.NormLayer):
if m.weight is not None:
if m in self.from_scratch_layers:
groups[2]['params'].append(m.weight)
else:
groups[0]['params'].append(m.weight)
if m.bias is not None:
if m in self.from_scratch_layers:
groups[3]['params'].append(m.bias)
else:
groups[1]['params'].append(m.bias)
elif hasattr(m, 'weight'):
None
for i, g in enumerate(groups):
None
return groups
class VGG16(BaseNet):
def __init__(self, fc6_dilation=1):
super(VGG16, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=2, dilation=2)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=2, dilation=2)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=2, dilation=2)
self.fc6 = nn.Conv2d(512, 1024, 3, padding=fc6_dilation, dilation=
fc6_dilation)
self.drop6 = nn.Dropout2d(p=0.5)
self.fc7 = nn.Conv2d(1024, 1024, 1)
self._fix_params([self.conv1_1, self.conv1_2])
def fan_out(self):
return 1024
def forward(self, x):
return self.forward_as_dict(x)['conv6']
def forward_as_dict(self, x):
x = F.relu(self.conv1_1(x), inplace=True)
x = F.relu(self.conv1_2(x), inplace=True)
x = self.pool1(x)
x = F.relu(self.conv2_1(x), inplace=True)
x = F.relu(self.conv2_2(x), inplace=True)
x = self.pool2(x)
x = F.relu(self.conv3_1(x), inplace=True)
x = F.relu(self.conv3_2(x), inplace=True)
x = F.relu(self.conv3_3(x), inplace=True)
conv3 = x
x = self.pool3(x)
x = F.relu(self.conv4_1(x), inplace=True)
x = F.relu(self.conv4_2(x), inplace=True)
x = F.relu(self.conv4_3(x), inplace=True)
x = self.pool4(x)
x = F.relu(self.conv5_1(x), inplace=True)
x = F.relu(self.conv5_2(x), inplace=True)
x = F.relu(self.conv5_3(x), inplace=True)
x = F.relu(self.fc6(x), inplace=True)
x = self.drop6(x)
x = F.relu(self.fc7(x), inplace=True)
conv6 = x
return dict({'conv3': conv3, 'conv6': conv6})
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_8(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 32
x1 = xindex // 64 % 32
x0 = xindex % 64
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4160 + x0 + 128 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 128 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-4032 + x0 + 128 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-64 + x0 + 128 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4032 + x0 + 128 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tl.store(out_ptr0 + x6, tmp51, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 16
x1 = xindex // 128 % 16
x0 = xindex % 128
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4224 + x0 + 256 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 256 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3968 + x0 + 256 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-128 + x0 + 256 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3968 + x0 + 256 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 8
x1 = xindex // 256 % 8
x0 = xindex % 256
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4352 + x0 + 512 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 512 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3840 + x0 + 512 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-256 + x0 + 512 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3840 + x0 + 512 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4096 % 8
x1 = xindex // 512 % 8
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4608 + x6), tmp10, other=float('-inf'))
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x6), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3584 + x6), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-512 + x6), tmp30, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (512 + x6), tmp36, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3584 + x6), tmp43, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x6), tmp46, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4608 + x6), tmp49, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 1024
y1 = yindex // 1024
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 1024 * x2 + 65536 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 1024 * x2 + 65536 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (1024,), (1,))
assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_31, (1024,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 9)](primals_6, buf0, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_1[grid(16384, 9)](primals_8, buf1, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf2 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(32768, 9)](primals_10, buf2, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(65536, 9)](primals_12, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(65536, 9)](primals_14, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf5 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_4[grid(131072, 9)](primals_16, buf5, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_5[grid(262144, 9)](primals_18, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_5[grid(262144, 9)](primals_20, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_5[grid(262144, 9)](primals_22, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_5[grid(262144, 9)](primals_24, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_5[grid(262144, 9)](primals_26, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf11 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_6[grid(524288, 9)](primals_28, buf11, 524288, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_28
buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3),
torch.float32)
triton_poi_fused_convolution_7[grid(12, 4096)](primals_3, buf12, 12,
4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf13 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
triton_poi_fused_convolution_8[grid(192, 9)](primals_1, buf13, 192,
9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf14 = extern_kernels.convolution(buf12, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
del buf12
del buf13
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf16 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_convolution_relu_10[grid(4096, 9)](primals_4,
buf16, 4096, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf17 = extern_kernels.convolution(buf15, buf16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 64, 64), (262144, 1, 4096, 64))
del buf15
del buf16
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_9[grid(1048576)](buf18, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_11[grid(262144)](buf18,
buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del buf18
buf20 = extern_kernels.convolution(buf19, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_12[grid(524288)](buf21, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf22 = extern_kernels.convolution(buf21, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_12[grid(524288)](buf23, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(131072)](buf23,
buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf24, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_14[grid(262144)](buf27,
primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf28 = extern_kernels.convolution(buf27, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_14[grid(262144)](buf29,
primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf30 = extern_kernels.convolution(buf29, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_14[grid(262144)](buf31,
primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(65536)](buf31,
buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf34 = extern_kernels.convolution(buf32, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_16[grid(131072)](buf35,
primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf36 = extern_kernels.convolution(buf35, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_16[grid(131072)](buf37,
primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf38 = extern_kernels.convolution(buf37, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_16[grid(131072)](buf39,
primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
buf41 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(131072)](buf39,
buf40, buf41, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf42 = extern_kernels.convolution(buf40, buf8, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_16[grid(131072)](buf43,
primals_23, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf44 = extern_kernels.convolution(buf43, buf9, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf45 = buf44
del buf44
triton_poi_fused_convolution_relu_16[grid(131072)](buf45,
primals_25, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf46 = extern_kernels.convolution(buf45, buf10, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_16[grid(131072)](buf47,
primals_27, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf48 = extern_kernels.convolution(buf47, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_18[grid(262144)](buf49,
primals_29, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_29
buf50 = extern_kernels.convolution(buf49, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf51 = empty_strided_cuda((4, 1024, 8, 8), (65536, 64, 8, 1),
torch.float32)
buf52 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_19[grid(4096, 64)
](buf50, primals_31, buf51, buf52, 4096, 64, XBLOCK=32, YBLOCK=
32, num_warps=4, num_stages=1)
del buf50
del primals_31
return (buf51, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, primals_30, buf19, buf21, buf23, buf24, buf25,
buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf40,
buf41, buf43, buf45, buf47, buf49, buf52)
class Normalize:
def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def undo(self, imgarr):
proc_img = imgarr.copy()
proc_img[..., 0] = (self.std[0] * imgarr[..., 0] + self.mean[0]
) * 255.0
proc_img[..., 1] = (self.std[1] * imgarr[..., 1] + self.mean[1]
) * 255.0
proc_img[..., 2] = (self.std[2] * imgarr[..., 2] + self.mean[2]
) * 255.0
return proc_img
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255.0 - self.mean[0]) / self.std[0
]
proc_img[..., 1] = (imgarr[..., 1] / 255.0 - self.mean[1]) / self.std[1
]
proc_img[..., 2] = (imgarr[..., 2] / 255.0 - self.mean[2]) / self.std[2
]
return proc_img
class BaseNet(nn.Module):
def __init__(self):
super().__init__()
self.normalize = Normalize()
self.NormLayer = nn.BatchNorm2d
self.not_training = []
self.bn_frozen = []
self.from_scratch_layers = []
def _init_weights(self, path_to_weights):
None
weights_dict = torch.load(path_to_weights)
self.load_state_dict(weights_dict, strict=False)
def fan_out(self):
raise NotImplementedError
def fixed_layers(self):
return self.not_training
def _fix_running_stats(self, layer, fix_params=False):
if isinstance(layer, self.NormLayer):
self.bn_frozen.append(layer)
if fix_params and layer not in self.not_training:
self.not_training.append(layer)
elif isinstance(layer, list):
for m in layer:
self._fix_running_stats(m, fix_params)
else:
for m in layer.children():
self._fix_running_stats(m, fix_params)
def _fix_params(self, layer):
if isinstance(layer, nn.Conv2d) or isinstance(layer, self.NormLayer
) or isinstance(layer, nn.Linear):
self.not_training.append(layer)
if isinstance(layer, self.NormLayer):
self.bn_frozen.append(layer)
elif isinstance(layer, list):
for m in layer:
self._fix_params(m)
elif isinstance(layer, nn.Module):
if hasattr(layer, 'weight') or hasattr(layer, 'bias'):
None
for m in layer.children():
self._fix_params(m)
def _freeze_bn(self, layer):
if isinstance(layer, self.NormLayer):
layer.eval()
elif isinstance(layer, nn.Module):
for m in layer.children():
self._freeze_bn(m)
def train(self, mode=True):
super().train(mode)
for layer in self.not_training:
if hasattr(layer, 'weight') and layer.weight is not None:
layer.weight.requires_grad = False
if hasattr(layer, 'bias') and layer.bias is not None:
layer.bias.requires_grad = False
elif isinstance(layer, torch.nn.Module):
None
for bn_layer in self.bn_frozen:
self._freeze_bn(bn_layer)
def _lr_mult(self):
return 1.0, 2.0, 10.0, 20
def parameter_groups(self, base_lr, wd):
w_old, b_old, w_new, b_new = self._lr_mult()
groups = {'params': [], 'weight_decay': wd, 'lr': w_old * base_lr}, {
'params': [], 'weight_decay': 0.0, 'lr': b_old * base_lr}, {
'params': [], 'weight_decay': wd, 'lr': w_new * base_lr}, {'params'
: [], 'weight_decay': 0.0, 'lr': b_new * base_lr}
fixed_layers = self.fixed_layers()
for m in self.modules():
if m in fixed_layers:
continue
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear
) or isinstance(m, self.NormLayer):
if m.weight is not None:
if m in self.from_scratch_layers:
groups[2]['params'].append(m.weight)
else:
groups[0]['params'].append(m.weight)
if m.bias is not None:
if m in self.from_scratch_layers:
groups[3]['params'].append(m.bias)
else:
groups[1]['params'].append(m.bias)
elif hasattr(m, 'weight'):
None
for i, g in enumerate(groups):
None
return groups
class VGG16New(BaseNet):
def __init__(self, fc6_dilation=1):
super(VGG16New, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=2, dilation=2)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=2, dilation=2)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=2, dilation=2)
self.fc6 = nn.Conv2d(512, 1024, 3, padding=fc6_dilation, dilation=
fc6_dilation)
self.drop6 = nn.Dropout2d(p=0.5)
self.fc7 = nn.Conv2d(1024, 1024, 1)
self._fix_params([self.conv1_1, self.conv1_2])
def fan_out(self):
return 1024
def forward_as_dict(self, x):
x = F.relu(self.conv1_1(x), inplace=True)
x = F.relu(self.conv1_2(x), inplace=True)
x = self.pool1(x)
x = F.relu(self.conv2_1(x), inplace=True)
x = F.relu(self.conv2_2(x), inplace=True)
x = self.pool2(x)
x = F.relu(self.conv3_1(x), inplace=True)
x = F.relu(self.conv3_2(x), inplace=True)
x = F.relu(self.conv3_3(x), inplace=True)
conv3 = x
x = self.pool3(x)
x = F.relu(self.conv4_1(x), inplace=True)
x = F.relu(self.conv4_2(x), inplace=True)
x = F.relu(self.conv4_3(x), inplace=True)
x = self.pool4(x)
x = F.relu(self.conv5_1(x), inplace=True)
x = F.relu(self.conv5_2(x), inplace=True)
x = F.relu(self.conv5_3(x), inplace=True)
x = F.relu(self.fc6(x), inplace=True)
x = self.drop6(x)
x = F.relu(self.fc7(x), inplace=True)
conv6 = x
return dict({'conv3': conv3, 'conv6': conv6})
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_1.weight
primals_23 = self.conv5_1.bias
primals_24 = self.conv5_2.weight
primals_25 = self.conv5_2.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_28 = self.fc6.weight
primals_29 = self.fc6.bias
primals_30 = self.fc7.weight
primals_31 = self.fc7.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
| loserbbb/1-stage-wseg | VGG16 | false | 15,992 | [
"Apache-2.0"
] | 364 | f1579be241986c1e19420bfbf6711b6c2208d99a | https://github.com/loserbbb/1-stage-wseg/tree/f1579be241986c1e19420bfbf6711b6c2208d99a |
CrossPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/54/c54fiiozms64dqfszq2hf52cdztx43kas6yivnlda7p3bxzbtzle.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CrossPooling(nn.Module):
""" Cross pooling """
def forward(self, x):
""" Forward function of CrossPooling module.
Args:
x: a stack of (batch x channel x height x width) tensors on the last axis.
Returns:
A (batch x channel x height x width) tensor after applying max-pooling
over the last axis.
"""
x, _ = torch.max(x, dim=-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class CrossPoolingNew(nn.Module):
""" Cross pooling """
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| manipopopo/C5 | CrossPooling | false | 15,993 | [
"Apache-2.0"
] | 51 | 154eb38c330e65476ddb77836948a28237f23c88 | https://github.com/manipopopo/C5/tree/154eb38c330e65476ddb77836948a28237f23c88 |
CausalAttentionSortNet | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tw/ctwca7e53sw7tsm2n6nxlsu2flxorxjthpe4map7zstuzobylnv2.py
# Topologically Sorted Source Nodes: [triu_indices], Original ATen: [aten.triu_indices]
# Source node to ATen node mapping:
# triu_indices => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_3, %add_4],), kwargs = {})
triton_poi_fused_triu_indices_0 = async_compile.triton('triton_poi_fused_triu_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_triu_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_triu_indices_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp0.to(tl.float64)
tmp6 = tl.full([1], 2.0, tl.float64)
tmp7 = tmp5 * tmp6
tmp8 = tl.full([1], 2.25, tl.float64)
tmp9 = tmp8 - tmp7
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full([1], 1.5, tl.float64)
tmp12 = tmp11 - tmp10
tmp13 = libdevice.floor(tmp12)
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 + tmp1
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tmp19 = tl.full([1], 2, tl.int64)
tmp20 = tmp0 < tmp19
tmp21 = (-1) + x0
tmp22 = tmp21.to(tl.float64)
tmp23 = tmp22 * tmp6
tmp24 = tmp8 - tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp11 - tmp25
tmp27 = libdevice.floor(tmp26)
tmp28 = tl.full([1], 1.0, tl.float64)
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp27
tmp31 = tl.full([1], 0.5, tl.float64)
tmp32 = tmp30 * tmp31
tmp33 = tmp22 - tmp32
tmp34 = libdevice.floor(tmp33)
tmp35 = tmp34.to(tl.int64)
tmp36 = tmp35 + tmp1
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp18, tmp36, tmp37)
tmp39 = tl.where(tmp4, tmp17, tmp38)
tl.store(out_ptr0 + (x0), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ws/cws2pkumetz45dxmoqens35w7rueu4uvcpffdxy6yictkzqijkrp.py
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# mask => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 2], False), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], False, tl.int1)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dh/cdhqzh26rvizxiomi5dcslhwesa5ec2smf6imvsywub7g674fl45.py
# Topologically Sorted Source Nodes: [mask, setitem], Original ATen: [aten._to_copy, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# mask => full_default
# setitem => full_default_1, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 2], False), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], True), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default, [None, %select_1, %add_5], %full_default_1), kwargs = {})
triton_poi_fused__to_copy_index_put_lift_fresh_2 = async_compile.triton('triton_poi_fused__to_copy_index_put_lift_fresh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_index_put_lift_fresh_2', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_index_put_lift_fresh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp2 = tl.full([XBLOCK], 1, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 1), "index out of bounds: 0 <= tmp5 < 1")
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([XBLOCK], 2, tl.int32)
tmp12 = tmp10 + tmp11
tmp13 = tmp10 < 0
tmp14 = tl.where(tmp13, tmp12, tmp10)
tl.device_assert((0 <= tmp14) & (tmp14 < 2), "index out of bounds: 0 <= tmp14 < 2")
tmp16 = tl.full([1], True, tl.int1)
tl.store(out_ptr0 + (tmp14 + (2*x0)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x3/cx3jkjox5glhioo55hq4lxn7c2bxfpht5rbjqj7ibjcb3e3cvye5.py
# Topologically Sorted Source Nodes: [cumsum], Original ATen: [aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# Graph fragment:
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%arg0_1, 1), kwargs = {})
triton_per_fused_cumsum_3 = async_compile.triton('triton_per_fused_cumsum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[16, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (16*x1)), xmask, other=0.0)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + (4*r2) + (16*x1)), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2d/c2dobnmuco5z6u5eifqvap5ynacsgwyrtxve2d5x6oc4av5khvvv.py
# Topologically Sorted Source Nodes: [sk, sk_1], Original ATen: [aten.sum, aten.constant_pad_nd]
# Source node to ATen node mapping:
# sk => sum_1
# sk_1 => constant_pad_nd
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [2]), kwargs = {})
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%sum_1, [0, 0, 1, 0], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_sum_4 = async_compile.triton('triton_poi_fused_constant_pad_nd_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 2
x0 = xindex % 4
x2 = (xindex // 8)
x3 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = 1.0
tmp5 = tmp3 / tmp4
tmp6 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = 2.0
tmp8 = tmp6 / tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 3.0
tmp12 = tmp10 / tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp13 + tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp2, tmp17, tmp18)
tl.store(out_ptr0 + (x3), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/54/c54a43jsugmbxg43yp3t4eg543qkdatjeumgr32cnqcjluns5zoa.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum, %unsqueeze_1), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = 1 + x1
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 / tmp2
tl.store(in_out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ax/caxqacj43536crlu7tgd4z6ofnibkbo7bpidzoxj46jos3qkpie5.py
# Topologically Sorted Source Nodes: [masked_fill_, R, softmax], Original ATen: [aten.masked_fill, aten.mul, aten._softmax]
# Source node to ATen node mapping:
# R => mul_2
# masked_fill_ => full_default_2, where
# softmax => exp
# Graph fragment:
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -3.4028234663852886e+38), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%index_put, %full_default_2, %mul_2), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_masked_fill_mul_6 = async_compile.triton('triton_poi_fused__softmax_masked_fill_mul_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_mul_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_mul_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp8 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2*x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp14 = tl.load(in_ptr1 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -3.4028234663852886e+38
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp10 = tmp9 * tmp2
tmp11 = tl.where(tmp8, tmp4, tmp10)
tmp12 = tmp11 * tmp6
tmp15 = tmp14 * tmp2
tmp16 = tl.where(tmp13, tmp4, tmp15)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rd/crdmbxpwwhidmhg5srk2wtcyopyahyjcf3khxkzxgxdmtyngvzf6.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_4, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_7 = async_compile.triton('triton_poi_fused__softmax_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jr/cjrpc3g6c5ygq26xe7exkern2zx7lnfnkrzepvcmscgrq2gj2k25.py
# Topologically Sorted Source Nodes: [zeros_like, topks], Original ATen: [aten.zeros_like, aten.scatter]
# Source node to ATen node mapping:
# topks => scatter
# zeros_like => full_default_3
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 2], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %scatter : [num_users=1] = call_function[target=torch.ops.aten.scatter.src](args = (%full_default_3, -1, %getitem_1, %getitem), kwargs = {})
triton_poi_fused_scatter_zeros_like_8 = async_compile.triton('triton_poi_fused_scatter_zeros_like_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_scatter_zeros_like_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_scatter_zeros_like_8(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rq/crq5assai74fbipyt6hkpepbzsztmanglvk6rai6i5x4tup55m5e.py
# Topologically Sorted Source Nodes: [zeros_like, topks], Original ATen: [aten.zeros_like, aten.scatter]
# Source node to ATen node mapping:
# topks => scatter
# zeros_like => full_default_3
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 2], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %scatter : [num_users=1] = call_function[target=torch.ops.aten.scatter.src](args = (%full_default_3, -1, %getitem_1, %getitem), kwargs = {})
triton_poi_fused_scatter_zeros_like_9 = async_compile.triton('triton_poi_fused_scatter_zeros_like_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_scatter_zeros_like_9', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_scatter_zeros_like_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tl.device_assert(((0 <= tmp0) & (tmp0 < 2)) | ~(xmask), "index out of bounds: 0 <= tmp0 < 2")
tl.store(out_ptr0 + (tmp0 + (2*x0)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [triu_indices], Original ATen: [aten.triu_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_triu_indices_0.run(buf0, 2, grid=grid(2), stream=stream0)
buf1 = empty_strided_cuda((4, 1, 2), (2, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_1.run(buf1, 8, grid=grid(8), stream=stream0)
# Topologically Sorted Source Nodes: [mask, setitem], Original ATen: [aten._to_copy, aten.lift_fresh, aten.index_put]
triton_poi_fused__to_copy_index_put_lift_fresh_2.run(buf0, buf1, 4, grid=grid(4), stream=stream0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cumsum], Original ATen: [aten.cumsum]
triton_per_fused_cumsum_3.run(arg0_1, buf3, 16, 4, grid=grid(16), stream=stream0)
del arg0_1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cumsum_1], Original ATen: [aten.cumsum]
triton_per_fused_cumsum_3.run(arg1_1, buf4, 16, 4, grid=grid(16), stream=stream0)
del arg1_1
buf5 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sk, sk_1], Original ATen: [aten.sum, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_sum_4.run(buf4, buf5, 32, grid=grid(32), stream=stream0)
del buf4
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 1, 2), (2, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (16, 0, 1), 0), reinterpret_tensor(buf5, (4, 4, 2), (8, 1, 4), 0), out=buf7)
del buf5
del buf6
buf8 = empty_strided_cuda((4, 1, 2), (2, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [masked_fill_, R, softmax], Original ATen: [aten.masked_fill, aten.mul, aten._softmax]
triton_poi_fused__softmax_masked_fill_mul_6.run(buf1, buf7, buf8, 8, grid=grid(8), stream=stream0)
del buf1
buf9 = reinterpret_tensor(buf7, (4, 1, 2), (2, 8, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_7.run(buf8, buf9, 8, grid=grid(8), stream=stream0)
del buf8
# Topologically Sorted Source Nodes: [softmax, topk], Original ATen: [aten._softmax, aten.topk]
buf10 = torch.ops.aten.topk.default(buf9, 1)
buf11 = buf10[0]
buf12 = buf10[1]
del buf10
buf13 = reinterpret_tensor(buf9, (4, 1, 2), (2, 2, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [zeros_like, topks], Original ATen: [aten.zeros_like, aten.scatter]
triton_poi_fused_scatter_zeros_like_8.run(buf13, 8, grid=grid(8), stream=stream0)
# Topologically Sorted Source Nodes: [zeros_like, topks], Original ATen: [aten.zeros_like, aten.scatter]
triton_poi_fused_scatter_zeros_like_9.run(buf12, buf11, buf13, 4, grid=grid(4), stream=stream0)
del buf11
del buf12
return (buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
from torch import nn
def bucket(buckets, t, dim=1):
shape = list(t.shape)
shape[dim:dim + 1] = [buckets, -1]
return t.reshape(*shape)
def differentiable_topk(x, k, temperature=1.0):
*_, n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == k - 1
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x.scatter_(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(*_, k * n, dim)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cumavg(t, dim):
r = torch.arange(1, t.shape[dim] + 1, device=t.device, dtype=t.dtype)
expand_slice = [None] * len(t.shape)
expand_slice[dim] = slice(None, None)
return t.cumsum(dim=dim) / r[tuple(expand_slice)]
def mask_reordering_matrix(R, topk, temperature):
buckets = R.shape[1]
mask_value = max_neg_value(R)
mask = torch.zeros(R.shape, device=R.device).bool()
i, j = torch.triu_indices(buckets, buckets)
mask[:, i, j + topk] = True
R.masked_fill_(mask, mask_value)
return differentiable_topk(R, topk, temperature)
class CausalAttentionSortNet(nn.Module):
def __init__(self, heads, bucket_size, dim, temperature):
super().__init__()
self.heads = heads
self.bucket_size = bucket_size
self.dim = dim
self.temperature = temperature
def forward(self, q, k, topk=1):
bh, *_, h, dim = *q.shape, self.heads, self.dim
bh // h
buckets = q.shape[1] // self.bucket_size
kv_buckets = k.shape[1] // self.bucket_size
q_r = bucket(buckets, cumavg(q, dim=1))
k_r = bucket(kv_buckets, cumavg(k, dim=1))
sq = q_r[:, :, 0]
sk = k_r.sum(dim=2)
sk = F.pad(sk, (0, 0, topk, 0))
R = torch.einsum('bie,bje->bij', sq, sk) * dim ** -0.5
return mask_reordering_matrix(R, topk, self.temperature)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'bucket_size': 4, 'dim': 4, 'temperature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_triu_indices_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp0.to(tl.float64)
tmp6 = tl.full([1], 2.0, tl.float64)
tmp7 = tmp5 * tmp6
tmp8 = tl.full([1], 2.25, tl.float64)
tmp9 = tmp8 - tmp7
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full([1], 1.5, tl.float64)
tmp12 = tmp11 - tmp10
tmp13 = libdevice.floor(tmp12)
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 + tmp1
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp21 = -1 + x0
tmp22 = tmp21.to(tl.float64)
tmp23 = tmp22 * tmp6
tmp24 = tmp8 - tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp11 - tmp25
tmp27 = libdevice.floor(tmp26)
tmp28 = tl.full([1], 1.0, tl.float64)
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp27
tmp31 = tl.full([1], 0.5, tl.float64)
tmp32 = tmp30 * tmp31
tmp33 = tmp22 - tmp32
tmp34 = libdevice.floor(tmp33)
tmp35 = tmp34.to(tl.int64)
tmp36 = tmp35 + tmp1
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp18, tmp36, tmp37)
tmp39 = tl.where(tmp4, tmp17, tmp38)
tl.store(out_ptr0 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], False, tl.int1)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_index_put_lift_fresh_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp7 = tl.load(in_ptr0 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp2 = tl.full([XBLOCK], 1, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 1),
'index out of bounds: 0 <= tmp5 < 1')
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([XBLOCK], 2, tl.int32)
tmp12 = tmp10 + tmp11
tmp13 = tmp10 < 0
tmp14 = tl.where(tmp13, tmp12, tmp10)
tl.device_assert((0 <= tmp14) & (tmp14 < 2),
'index out of bounds: 0 <= tmp14 < 2')
tmp16 = tl.full([1], True, tl.int1)
tl.store(out_ptr0 + (tmp14 + 2 * x0), tmp16, xmask)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl
.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 16 * x1), xmask, other=0.0)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 4 * r2 + 16 * x1), tmp3, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_sum_4(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 2
x0 = xindex % 4
x2 = xindex // 8
x3 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp2 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = 1.0
tmp5 = tmp3 / tmp4
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = 2.0
tmp8 = tmp6 / tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = 3.0
tmp12 = tmp10 / tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp13 + tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp2, tmp17, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_poi_fused_div_5(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = 1 + x1
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 / tmp2
tl.store(in_out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_mul_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp9 = tl.load(in_ptr1 + 2 * x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp14 = tl.load(in_ptr1 + (1 + 2 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -3.4028234663852886e+38
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp10 = tmp9 * tmp2
tmp11 = tl.where(tmp8, tmp4, tmp10)
tmp12 = tmp11 * tmp6
tmp15 = tmp14 * tmp2
tmp16 = tl.where(tmp13, tmp4, tmp15)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_scatter_zeros_like_8(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_scatter_zeros_like_9(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tl.device_assert((0 <= tmp0) & (tmp0 < 2) | ~xmask,
'index out of bounds: 0 <= tmp0 < 2')
tl.store(out_ptr0 + (tmp0 + 2 * x0), tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_triu_indices_0[grid(2)](buf0, 2, XBLOCK=2,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 2), (2, 2, 1), torch.bool)
triton_poi_fused__to_copy_1[grid(8)](buf1, 8, XBLOCK=8, num_warps=1,
num_stages=1)
triton_poi_fused__to_copy_index_put_lift_fresh_2[grid(4)](buf0,
buf1, 4, XBLOCK=4, num_warps=1, num_stages=1)
del buf0
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_cumsum_3[grid(16)](arg0_1, buf3, 16, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_cumsum_3[grid(16)](arg1_1, buf4, 16, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del arg1_1
buf5 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_sum_4[grid(32)](buf4, buf5, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del buf4
buf6 = buf3
del buf3
triton_poi_fused_div_5[grid(64)](buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 1, 2), (2, 2, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (16, 0, 1),
0), reinterpret_tensor(buf5, (4, 4, 2), (8, 1, 4), 0), out=buf7)
del buf5
del buf6
buf8 = empty_strided_cuda((4, 1, 2), (2, 8, 1), torch.float32)
triton_poi_fused__softmax_masked_fill_mul_6[grid(8)](buf1, buf7,
buf8, 8, XBLOCK=8, num_warps=1, num_stages=1)
del buf1
buf9 = reinterpret_tensor(buf7, (4, 1, 2), (2, 8, 1), 0)
del buf7
triton_poi_fused__softmax_7[grid(8)](buf8, buf9, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del buf8
buf10 = torch.ops.aten.topk.default(buf9, 1)
buf11 = buf10[0]
buf12 = buf10[1]
del buf10
buf13 = reinterpret_tensor(buf9, (4, 1, 2), (2, 2, 1), 0)
del buf9
triton_poi_fused_scatter_zeros_like_8[grid(8)](buf13, 8, XBLOCK=8,
num_warps=1, num_stages=1)
triton_poi_fused_scatter_zeros_like_9[grid(4)](buf12, buf11, buf13,
4, XBLOCK=4, num_warps=1, num_stages=1)
del buf11
del buf12
return buf13,
def bucket(buckets, t, dim=1):
shape = list(t.shape)
shape[dim:dim + 1] = [buckets, -1]
return t.reshape(*shape)
def differentiable_topk(x, k, temperature=1.0):
*_, n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == k - 1
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x.scatter_(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(*_, k * n, dim)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cumavg(t, dim):
r = torch.arange(1, t.shape[dim] + 1, device=t.device, dtype=t.dtype)
expand_slice = [None] * len(t.shape)
expand_slice[dim] = slice(None, None)
return t.cumsum(dim=dim) / r[tuple(expand_slice)]
def mask_reordering_matrix(R, topk, temperature):
buckets = R.shape[1]
mask_value = max_neg_value(R)
mask = torch.zeros(R.shape, device=R.device).bool()
i, j = torch.triu_indices(buckets, buckets)
mask[:, i, j + topk] = True
R.masked_fill_(mask, mask_value)
return differentiable_topk(R, topk, temperature)
class CausalAttentionSortNetNew(nn.Module):
def __init__(self, heads, bucket_size, dim, temperature):
super().__init__()
self.heads = heads
self.bucket_size = bucket_size
self.dim = dim
self.temperature = temperature
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lucidrains/sinkhorn-transformer | CausalAttentionSortNet | false | 15,994 | [
"MIT"
] | 216 | 531bdbe46dfc2abd20183dbcede669bc9df567c6 | https://github.com/lucidrains/sinkhorn-transformer/tree/531bdbe46dfc2abd20183dbcede669bc9df567c6 |
SE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tu/ctuej2j6f3oxr5p43q7juhagc3r3ncgs2ikvxemutunlnxlnvl24.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2, -3], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cv/ccvicmhiupo7cb3dwu3rzvk4zvi24nzhtwz7a5c4ke3qpmz3ofpe.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/az/cazousalzuqn73ciahz5izvogzu4ekcsktal4tthjvwjd3cqdayz.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/72/c72kehajbo6zfhkuwjl3g6t24haqfzxumia5abs5c2hzebjb6ubo.py
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x_4 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 1, 1, 1), (16, 1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (16, 1, 1, 1), (1, 16, 16, 16), 0); del buf2 # reuse
buf7 = empty_strided_cuda((16, 1, 1, 1), (1, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_3, buf7, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 16, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 4, grid=grid(4), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 1, 1, 1), (16, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, x):
x_in = x
for module in self.children():
x = module(x)
return x_in * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'ratio': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1, 1, 1), (16, 1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1,
1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (16, 1, 1, 1), (1, 16, 16, 16), 0)
del buf2
buf7 = empty_strided_cuda((16, 1, 1, 1), (1, 1, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3,
primals_3, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 16,
1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(4)](buf5, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1,
16, 1, 1, 1), (16, 1, 1, 1, 1), 0), buf5, buf7
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SENew(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor *
divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SENew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| makarandtapaswi/SlowFast | SE | false | 15,995 | [
"Apache-2.0"
] | 4,914 | 39ef35c9a086443209b458cceaec86a02e27b369 | https://github.com/makarandtapaswi/SlowFast/tree/39ef35c9a086443209b458cceaec86a02e27b369 |
SEModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# outputs => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py
# Topologically Sorted Source Nodes: [outputs_1, outputs_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# outputs_1 => convolution
# outputs_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gy/cgy2kl53lwau2exmfkzj3vayf574d42r22vj2zsyljclxadypgqh.py
# Topologically Sorted Source Nodes: [outputs_3, mul, add, relu6, outputs_4, outputs_5], Original ATen: [aten.convolution, aten.mul, aten.add, aten.hardtanh, aten.div]
# Source node to ATen node mapping:
# add => add
# mul => mul
# outputs_3 => convolution_1
# outputs_4 => div
# outputs_5 => mul_1
# relu6 => clamp_max, clamp_min
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.2), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_convolution_div_hardtanh_mul_2 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.2
tmp5 = tmp3 * tmp4
tmp6 = 3.0
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = 6.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp12 = 0.16666666666666666
tmp13 = tmp11 * tmp12
tmp14 = tmp0 * tmp13
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/td/ctdc6mz6245p7clw2oaccdlvba3ey2aijlkmsav4pkyt5aqy23q4.py
# Topologically Sorted Source Nodes: [outputs_3, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add, aten.hardtanh_backward]
# Source node to ATen node mapping:
# add => add
# mul => mul
# outputs_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.2), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 3.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_add_convolution_hardtanh_backward_mul_3 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.2
tmp4 = tmp2 * tmp3
tmp5 = 3.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tmp9 = 6.0
tmp10 = tmp6 >= tmp9
tmp11 = tmp8 | tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [outputs_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [outputs_1, outputs_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [outputs_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs_3, mul, add, relu6, outputs_4, outputs_5], Original ATen: [aten.convolution, aten.mul, aten.add, aten.hardtanh, aten.div]
triton_poi_fused_add_convolution_div_hardtanh_mul_2.run(primals_1, buf4, primals_5, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [outputs_3, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add, aten.hardtanh_backward]
triton_poi_fused_add_convolution_hardtanh_backward_mul_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0
class Activation(nn.Module):
def __init__(self, act_type, inplace=True):
super(Activation, self).__init__()
act_type = act_type.lower()
if act_type == 'relu':
self.act = nn.ReLU(inplace=inplace)
elif act_type == 'relu6':
self.act = nn.ReLU6(inplace=inplace)
elif act_type == 'sigmoid':
raise NotImplementedError
elif act_type == 'hard_sigmoid':
self.act = Hsigmoid(inplace)
elif act_type == 'hard_swish':
self.act = Hswish(inplace=inplace)
elif act_type == 'leakyrelu':
self.act = nn.LeakyReLU(inplace=inplace)
else:
raise NotImplementedError
def forward(self, inputs):
return self.act(inputs)
class SEModule(nn.Module):
def __init__(self, in_channels, reduction=4, name=''):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
in_channels // reduction, kernel_size=1, stride=1, padding=0,
bias=True)
self.relu1 = Activation(act_type='relu', inplace=True)
self.conv2 = nn.Conv2d(in_channels=in_channels // reduction,
out_channels=in_channels, kernel_size=1, stride=1, padding=0,
bias=True)
self.hard_sigmoid = Activation(act_type='hard_sigmoid', inplace=True)
def forward(self, inputs):
outputs = self.avg_pool(inputs)
outputs = self.conv1(outputs)
outputs = self.relu1(outputs)
outputs = self.conv2(outputs)
outputs = self.hard_sigmoid(outputs)
outputs = inputs * outputs
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.2
tmp5 = tmp3 * tmp4
tmp6 = 3.0
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = 6.0
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp12 = 0.16666666666666666
tmp13 = tmp11 * tmp12
tmp14 = tmp0 * tmp13
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_mul_3(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.2
tmp4 = tmp2 * tmp3
tmp5 = 3.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tmp9 = 6.0
tmp10 = tmp6 >= tmp9
tmp11 = tmp8 | tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf4, primals_5, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_mul_3[grid(16)](buf4
, primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0
class Activation(nn.Module):
def __init__(self, act_type, inplace=True):
super(Activation, self).__init__()
act_type = act_type.lower()
if act_type == 'relu':
self.act = nn.ReLU(inplace=inplace)
elif act_type == 'relu6':
self.act = nn.ReLU6(inplace=inplace)
elif act_type == 'sigmoid':
raise NotImplementedError
elif act_type == 'hard_sigmoid':
self.act = Hsigmoid(inplace)
elif act_type == 'hard_swish':
self.act = Hswish(inplace=inplace)
elif act_type == 'leakyrelu':
self.act = nn.LeakyReLU(inplace=inplace)
else:
raise NotImplementedError
def forward(self, inputs):
return self.act(inputs)
class SEModuleNew(nn.Module):
def __init__(self, in_channels, reduction=4, name=''):
super(SEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
in_channels // reduction, kernel_size=1, stride=1, padding=0,
bias=True)
self.relu1 = Activation(act_type='relu', inplace=True)
self.conv2 = nn.Conv2d(in_channels=in_channels // reduction,
out_channels=in_channels, kernel_size=1, stride=1, padding=0,
bias=True)
self.hard_sigmoid = Activation(act_type='hard_sigmoid', inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| manjrekarom/PaddleOCR2Pytorch | SEModule | false | 15,996 | [
"Apache-2.0"
] | 364 | 6d98508f4c85b9dd3bf022924b0ecc5354ec8281 | https://github.com/manjrekarom/PaddleOCR2Pytorch/tree/6d98508f4c85b9dd3bf022924b0ecc5354ec8281 |
RGBBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qn/cqnbkftode2hvarycthm7bxacpciqlbsp4p4cvx6a5tdl7j5vair.py
# Topologically Sorted Source Nodes: [add, weights], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# weights => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_2, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %add), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 12
x0 = xindex % 4
x2 = (xindex // 12)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f7/cf73pggcer4jxxnhdhgfke4kylkxhbikngtjw54ezv5h4guxeidb.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_4 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_3, torch.int64), kwargs = {})
triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7j/c7jycyq644ggynst23klcavbh7pgjlmhdje7mmnxmblckhihehbp.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_4 => add_3, clamp_max
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_3, 3), kwargs = {})
triton_poi_fused_add_clamp_2 = async_compile.triton('triton_poi_fused_add_clamp_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f2/cf2eaf34xy4hwkwpj455spstljzp6g25wzwkehr426mrz5o6d6zy.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_4 => add_2, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_1, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6i/c6iwh34jhd6rdfehw645jp6d3vvqwn6thaaj7yjpst25mrvgafl4.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.add, aten._unsafe_index, aten.sub, aten.mul]
# Source node to ATen node mapping:
# x_3 => add_1
# x_4 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_6, add_7, add_8, mul_3, mul_4, mul_5, sub_3, sub_4, sub_6
# Graph fragment:
# %add_1 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_6), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_4), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_7, %add_6), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_5), kwargs = {})
triton_poi_fused__unsafe_index_add_mul_sub_4 = async_compile.triton('triton_poi_fused__unsafe_index_add_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_mul_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (tmp8 + (4*tmp4)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr3 + (tmp15 + (4*tmp4)), xmask, eviction_policy='evict_last')
tmp18 = tmp16 + tmp17
tmp19 = tmp18 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp11 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr2 + (tmp8 + (4*tmp26) + (16*x2)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (tmp8 + (4*tmp26)), xmask, eviction_policy='evict_last')
tmp29 = tmp27 + tmp28
tmp30 = tl.load(in_ptr2 + (tmp15 + (4*tmp26) + (16*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (tmp15 + (4*tmp26)), xmask, eviction_policy='evict_last')
tmp32 = tmp30 + tmp31
tmp33 = tmp32 - tmp29
tmp34 = tmp33 * tmp20
tmp35 = tmp29 + tmp34
tmp36 = tmp35 - tmp22
tmp38 = tmp36 * tmp37
tmp39 = tmp22 + tmp38
tl.store(in_out_ptr0 + (x4), tmp39, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [style], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, weights], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_5, buf0, buf1, 48, grid=grid(48), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf1, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf2, (1, 12, 4, 4), (192, 16, 4, 1))
buf3 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_1.run(buf3, 8, grid=grid(8), stream=stream0)
buf4 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_2.run(buf4, 8, grid=grid(8), stream=stream0)
buf5 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_1.run(buf5, 8, grid=grid(8), stream=stream0)
buf6 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_2.run(buf6, 8, grid=grid(8), stream=stream0)
buf7 = empty_strided_cuda((8, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf7, 8, grid=grid(8), stream=stream0)
buf9 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf9, 8, grid=grid(8), stream=stream0)
buf10 = empty_strided_cuda((4, 3, 8, 8), (192, 64, 8, 1), torch.float32)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.add, aten._unsafe_index, aten.sub, aten.mul]
triton_poi_fused__unsafe_index_add_mul_sub_4.run(buf11, buf3, buf5, buf2, primals_6, buf6, buf7, buf4, buf9, 768, grid=grid(768), stream=stream0)
del buf2
del primals_6
return (buf11, primals_3, primals_5, buf0, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf1, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf4, buf5, buf6, buf7, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Conv2DMod(nn.Module):
def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1,
dilation=1, **kwargs):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel,
kernel)))
nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in',
nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, _c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) +
EPS)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation,
self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class RGBBlock(nn.Module):
def __init__(self, latent_dim, input_channel, upsample, rgba=False):
super().__init__()
self.input_channel = input_channel
self.to_style = nn.Linear(latent_dim, input_channel)
out_filters = 3 if not rgba else 4
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=False) if upsample else None
def forward(self, x, prev_rgb, istyle):
style = self.to_style(istyle)
x = self.conv(x, style)
if prev_rgb is not None:
x = x + prev_rgb
if self.upsample is not None:
x = self.upsample(x)
return x
def forward_(self, x, prev_rgb, style):
x = self.conv(x, style)
if prev_rgb is not None:
x = x + prev_rgb
if self.upsample is not None:
x = self.upsample(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'latent_dim': 4, 'input_channel': 4, 'upsample': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 12
x0 = xindex % 4
x2 = xindex // 12
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_mul_sub_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4), xmask, eviction_policy=
'evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr3 + (tmp15 + 4 * tmp4), xmask, eviction_policy=
'evict_last')
tmp18 = tmp16 + tmp17
tmp19 = tmp18 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp11 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr2 + (tmp8 + 4 * tmp26 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (tmp8 + 4 * tmp26), xmask, eviction_policy=
'evict_last')
tmp29 = tmp27 + tmp28
tmp30 = tl.load(in_ptr2 + (tmp15 + 4 * tmp26 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (tmp15 + 4 * tmp26), xmask, eviction_policy=
'evict_last')
tmp32 = tmp30 + tmp31
tmp33 = tmp32 - tmp29
tmp34 = tmp33 * tmp20
tmp35 = tmp29 + tmp34
tmp36 = tmp35 - tmp22
tmp38 = tmp36 * tmp37
tmp39 = tmp22 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(48)](primals_5, buf0, buf1, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf1, (12, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf2, (1, 12, 4, 4), (192, 16, 4, 1))
buf3 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_1[grid(8)](buf3, 8, XBLOCK=8, num_warps=1,
num_stages=1)
buf4 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_2[grid(8)](buf4, 8, XBLOCK=8, num_warps=
1, num_stages=1)
buf5 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_1[grid(8)](buf5, 8, XBLOCK=8, num_warps=1,
num_stages=1)
buf6 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused_add_clamp_2[grid(8)](buf6, 8, XBLOCK=8, num_warps=
1, num_stages=1)
buf7 = empty_strided_cuda((8,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(8)](buf7,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(8)](buf9,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 3, 8, 8), (192, 64, 8, 1), torch.float32
)
buf11 = buf10
del buf10
triton_poi_fused__unsafe_index_add_mul_sub_4[grid(768)](buf11, buf3,
buf5, buf2, primals_6, buf6, buf7, buf4, buf9, 768, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
del primals_6
return buf11, primals_3, primals_5, buf0, reinterpret_tensor(primals_4,
(1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf1, (12, 4,
1, 1), (4, 1, 1, 1), 0), buf3, buf4, buf5, buf6, buf7, buf9
class Conv2DMod(nn.Module):
def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1,
dilation=1, **kwargs):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel,
kernel)))
nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in',
nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, _c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) +
EPS)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation,
self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class RGBBlockNew(nn.Module):
def __init__(self, latent_dim, input_channel, upsample, rgba=False):
super().__init__()
self.input_channel = input_channel
self.to_style = nn.Linear(latent_dim, input_channel)
out_filters = 3 if not rgba else 4
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=False) if upsample else None
def forward_(self, x, prev_rgb, style):
x = self.conv(x, style)
if prev_rgb is not None:
x = x + prev_rgb
if self.upsample is not None:
x = self.upsample(x)
return x
def forward(self, input_0, input_1, input_2):
primals_1 = self.to_style.weight
primals_2 = self.to_style.bias
primals_5 = self.conv.weight
primals_4 = input_0
primals_3 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| mahmoudnafifi/HistoGAN | RGBBlock | false | 15,997 | [
"MIT"
] | 169 | 50be1482638ace3ec85d733e849dec494ede155b | https://github.com/mahmoudnafifi/HistoGAN/tree/50be1482638ace3ec85d733e849dec494ede155b |
_ChannelAttentionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3m/c3mxgkf4weymbmbgydi4j4i6eycdz2flzbf3jce3eapte2aqyfta.py
# Topologically Sorted Source Nodes: [attention_new], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# attention_new => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %bmm), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = tmp6 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_1 => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j4/cj4f6qdb45emg4zrdv5vzxtw2vswpyt2rqyalr6mxgomzeyk55j5.py
# Topologically Sorted Source Nodes: [mul, out], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# out => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_new], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf2
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_1, bmm_1], Original ATen: [aten._softmax, aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, out], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_3.run(primals_2, buf4, primals_1, buf5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from itertools import product as product
class _ChannelAttentionModule(nn.Module):
"""Channel attention module"""
def __init__(self, **kwargs):
super(_ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_a = x.view(batch_size, -1, height * width)
feat_a_transpose = x.view(batch_size, -1, height * width).permute(0,
2, 1)
attention = torch.bmm(feat_a, feat_a_transpose)
attention_new = torch.max(attention, dim=-1, keepdim=True)[0
].expand_as(attention) - attention
attention = self.softmax(attention_new)
feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height,
width)
out = self.beta * feat_e + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + x2, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = tmp6 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64,
16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1,
16), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16),
(64, 16, 1), 0), out=buf4)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_3[grid(256)](primals_2, buf4, primals_1,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf5, buf4
class _ChannelAttentionModuleNew(nn.Module):
"""Channel attention module"""
def __init__(self, **kwargs):
super(_ChannelAttentionModuleNew, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_2 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| maoweinuaa/FaceParsing | _ChannelAttentionModule | false | 15,998 | [
"MIT"
] | 138 | 5e153b636e7e57b20d3079b2e0f15aa02dc4046d | https://github.com/maoweinuaa/FaceParsing/tree/5e153b636e7e57b20d3079b2e0f15aa02dc4046d |
pdice_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cw/ccwlmtupqpzcdcnemyzb2psuayzk57eso73jspos7dvgalg7zzhe.py
# Topologically Sorted Source Nodes: [setitem, setitem_1, y_true_th, y_pred_th, mul_2, intersection, mul_3, add, i, j, add_1, add_2, score, mean, loss], Original ATen: [aten.lift_fresh, aten.index_put, aten.mul, aten.sum, aten.add, aten.div, aten.mean, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# i => sum_1
# intersection => sum_3
# j => sum_2
# loss => sub
# mean => mean
# mul_2 => mul_2
# mul_3 => mul_3
# score => div
# setitem => full_default, index_put
# setitem_1 => full_default_1, index_put_1
# y_pred_th => mul_1
# y_true_th => mul
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%arg0_1, [%ge], %full_default), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%lt], %full_default_1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %index_put_1), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %index_put_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 0.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {})
triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp8 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp1 = 0.8
tmp2 = tmp0 >= tmp1
tmp3 = 1.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = tmp4 < tmp1
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp4)
tmp9 = tmp8 * tmp7
tmp11 = tmp10 * tmp7
tmp12 = tmp9 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tl.broadcast_to(tmp9, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = tl.broadcast_to(tmp11, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 2.0
tmp23 = tmp15 * tmp22
tmp24 = tmp23 + tmp6
tmp25 = tmp18 + tmp21
tmp26 = tmp25 + tmp6
tmp27 = tmp24 / tmp26
tmp28 = tmp27 / tmp3
tmp29 = tmp3 - tmp28
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [setitem, setitem_1, y_true_th, y_pred_th, mul_2, intersection, mul_3, add, i, j, add_1, add_2, score, mean, loss], Original ATen: [aten.lift_fresh, aten.index_put, aten.mul, aten.sum, aten.add, aten.div, aten.mean, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0.run(buf5, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class pdice_loss(nn.Module):
def __init__(self, batch=True):
super(pdice_loss, self).__init__()
self.batch = batch
def soft_dice_coeff(self, y_true, y_pred, p):
smooth = 0.0
if self.batch:
pmap = p.clone()
pmap[pmap >= 0.8] = 1
pmap[pmap < 0.8] = 0
y_true_th = y_true * pmap
y_pred_th = y_pred * pmap
i = torch.sum(y_true_th)
j = torch.sum(y_pred_th)
intersection = torch.sum(y_true_th * y_pred_th)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2.0 * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred, pmap):
loss = 1 - self.soft_dice_coeff(y_true, y_pred, pmap)
return loss
def forward(self, y_pred, y_true, pmap):
b = self.soft_dice_loss(y_true, y_pred, pmap)
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp1 = 0.8
tmp2 = tmp0 >= tmp1
tmp3 = 1.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = tmp4 < tmp1
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp4)
tmp9 = tmp8 * tmp7
tmp11 = tmp10 * tmp7
tmp12 = tmp9 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tl.broadcast_to(tmp9, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = tl.broadcast_to(tmp11, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 2.0
tmp23 = tmp15 * tmp22
tmp24 = tmp23 + tmp6
tmp25 = tmp18 + tmp21
tmp26 = tmp25 + tmp6
tmp27 = tmp24 / tmp26
tmp28 = tmp27 / tmp3
tmp29 = tmp3 - tmp28
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0[grid
(1)](buf5, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf5,
class pdice_lossNew(nn.Module):
def __init__(self, batch=True):
super(pdice_lossNew, self).__init__()
self.batch = batch
def soft_dice_coeff(self, y_true, y_pred, p):
smooth = 0.0
if self.batch:
pmap = p.clone()
pmap[pmap >= 0.8] = 1
pmap[pmap < 0.8] = 0
y_true_th = y_true * pmap
y_pred_th = y_pred * pmap
i = torch.sum(y_true_th)
j = torch.sum(y_pred_th)
intersection = torch.sum(y_true_th * y_pred_th)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2.0 * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred, pmap):
loss = 1 - self.soft_dice_coeff(y_true, y_pred, pmap)
return loss
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| manuel-rdz/SGL-Retinal-Vessel-Segmentation | pdice_loss | false | 15,999 | [
"MIT"
] | 45 | 7897d977e77aa0b5d3acb86e0aa74c6829d67415 | https://github.com/manuel-rdz/SGL-Retinal-Vessel-Segmentation/tree/7897d977e77aa0b5d3acb86e0aa74c6829d67415 |
PatchEmbed | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jg/cjgafsignr6eltwpgfdtyyamm7z2oofx6jlesakdp45oail3wyp7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 4, 4], [1, 7, 7], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[67108864],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 51904512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16896) % 768
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 1, 16, 16), (768, 256, 256, 16, 1))
assert_size_stride(primals_2, (768, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 4, 4), padding=(1, 7, 7), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 768, 66, 16, 16), (12976128, 16896, 256, 16, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 51904512, grid=grid(51904512), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (4, 16896, 768), (12976128, 1, 16896), 0), primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((768, 3, 1, 16, 16), (768, 256, 256, 16, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1,
4, 4), padding=(1, 7, 7), conv_2d=False):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride,
padding=padding)
def forward(self, x):
x = self.proj(x)
return x.flatten(2).transpose(1, 2)
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16896 % 768
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 1, 16, 16), (768, 256, 256, 16, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
4, 4), padding=(1, 7, 7), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 768, 66, 16, 16), (12976128, 16896,
256, 16, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(51904512)](buf1, primals_2,
51904512, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 16896, 768), (12976128, 1, 16896), 0
), primals_1, primals_3
class PatchEmbedNew(nn.Module):
"""
PatchEmbed.
"""
def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1,
4, 4), padding=(1, 7, 7), conv_2d=False):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride,
padding=padding)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| makarandtapaswi/SlowFast | PatchEmbed | false | 16,000 | [
"Apache-2.0"
] | 4,914 | 39ef35c9a086443209b458cceaec86a02e27b369 | https://github.com/makarandtapaswi/SlowFast/tree/39ef35c9a086443209b458cceaec86a02e27b369 |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/di/cdijcpkyx56houfal4ki6il7jkaksycupkszs32h2msepvzzrlog.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 86528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 676) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pl/cpllpfgu6yj3hoeiy2fgu22gwwiekps4ak5ojo4ju7e2tpij6eek.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 576) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tr/ctrkzmyh4pj3ravgoqrmvb4w47tnmqrj33664h4xyyqctdwzby5v.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2q/c2qz2ws53yjsy6jju72g7tnxehfjxae2rfg45uiafec7haukf7jm.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_9 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kt/cktoaw2tj346cwycsyhchtdmh6r7rvxeyqrjqite25fkxbpqqwh7.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_4 = async_compile.triton('triton_per_fused__log_softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 26, 26), (21632, 676, 26, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 86528, grid=grid(86528), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 24, 24), (36864, 576, 24, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 147456, grid=grid(147456), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.int8)
buf5 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 36864, grid=grid(36864), stream=stream0)
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
triton_poi_fused_relu_3.run(buf7, primals_7, 512, grid=grid(512), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf11 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_4.run(buf8, buf11, 4, 10, grid=grid(4), stream=stream0)
del buf8
return (buf11, primals_2, primals_4, primals_1, buf1, buf3, buf4, reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), buf7, buf11, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 9216), (9216, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view((-1, 1, 28, 28))
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 26, 26), (21632, 676, 26, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86528)](buf1, primals_3,
86528, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 24, 24), (36864, 576, 24, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(147456)](buf3, primals_5,
147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(36864)](buf3, buf4,
buf5, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0),
reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_3[grid(512)](buf7, primals_7, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf11 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_4[grid(4)](buf8, buf11, 4, 10, XBLOCK
=1, num_warps=2, num_stages=1)
del buf8
return (buf11, primals_2, primals_4, primals_1, buf1, buf3, buf4,
reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), buf7, buf11,
primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| manjuransari/petastorm | Net | false | 16,001 | [
"Apache-2.0"
] | 1,393 | 1af7212a1293b1edb78767a359aa2b60db24b71b | https://github.com/manjuransari/petastorm/tree/1af7212a1293b1edb78767a359aa2b60db24b71b |
DoubleConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ry/cryojrqkmpscezq3wxzo5tzxaee6urxcijqbctbqwfktcvw3w7dy.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %clamp_max, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max]), kwargs = {})
triton_poi_fused_replication_pad2d_0 = async_compile.triton('triton_poi_fused_replication_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0))))) + (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) * ((((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0))))) + (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) * ((((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ln/clnaais2dzezy7i5ppzib3cjpd5uoc5pomnd2er4oxs7ycc37w42.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.0
tmp5 = tmp3 > tmp4
tmp6 = 0.01
tmp7 = tmp3 * tmp6
tmp8 = tl.where(tmp5, tmp3, tmp7)
tl.store(out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr1 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7fdaedtho322pswnb3fofhxvbs75ppkgneakjxevb5g5xavh4jt.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# y => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_replication_pad2d_0.run(primals_3, buf0, 144, grid=grid(144), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_2, buf2, buf3, 64, grid=grid(64), stream=stream0)
del primals_2
buf4 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad2d]
triton_poi_fused_replication_pad2d_0.run(buf3, buf4, 144, grid=grid(144), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
buf7 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf5, primals_5, buf6, buf7, 64, grid=grid(64), stream=stream0)
del buf5
del primals_5
buf8 = empty_strided_cuda((4, 1, 2, 2), (4, 4, 2, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 2, 2), (4, 4, 2, 1), torch.int8)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf7, buf8, buf9, 16, grid=grid(16), stream=stream0)
return (buf8, primals_1, primals_4, buf0, buf2, buf3, buf4, buf6, buf7, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvBlock(nn.Module):
""" Conv layer block """
def __init__(self, kernel, in_depth, conv_depth, stride=1, padding=1,
normalization=False, norm_type='BN', pooling=False,
bias_initialization='zeros', activation=True, dilation=1,
return_before_pooling=False):
""" ConvBlock constructor
Args:
kernel: kernel size (int)
in_depth: depth of input tensor
conv_depth: number of out channels produced by the convolution
stride: stide of the convolution; default is 1.
padding: zero-padding added to both sides before the convolution
operation; default is 1.
normalization: boolean flag to apply normalization after the conv;
default is false.
norm_type: normalization operation: 'BN' for batch-norm (default),
'IN' for instance normalization.
pooling: boolean flag to apply a 2 x 2 max-pooling with stride of 2
before returning the final result; default is false.
bias_initialization: bias initialization: 'zeros' (default) or 'ones'.
activation: boolean flag to apply a leaky ReLU activation; default is
true.
dilation: spacing between conv kernel elements; default is 1.
return_before_pooling: boolean flag to return the tensor before
applying max-pooling (if 'pooling' is true); default is false.
Returns:
ConvBlock object with the selected settings.
"""
super(ConvBlock, self).__init__()
conv = torch.nn.Conv2d(in_depth, conv_depth, kernel, stride=stride,
dilation=dilation, padding=padding, padding_mode='replicate')
torch.nn.init.kaiming_normal_(conv.weight)
if bias_initialization == 'ones':
torch.nn.init.ones_(conv.bias)
elif bias_initialization == 'zeros':
torch.nn.init.zeros_(conv.bias)
else:
raise NotImplementedError
if activation:
self.activation = torch.nn.LeakyReLU(inplace=False)
else:
self.activation = None
if normalization:
if norm_type == 'BN':
self.normalization = torch.nn.BatchNorm2d(conv_depth,
affine=True)
elif norm_type == 'IN':
self.normalization = torch.nn.InstanceNorm2d(conv_depth,
affine=False)
else:
raise NotImplementedError
else:
self.normalization = None
self.conv = conv
if pooling:
self.pooling = torch.nn.MaxPool2d(2, stride=2)
else:
self.pooling = None
self.return_before_pooling = return_before_pooling
def forward(self, x):
""" Forward function of ConvBlock module
Args:
x: input tensor.
Returns:
y: processed tensor.
"""
x = self.conv(x)
if self.normalization is not None:
x = self.normalization(x)
if self.activation is not None:
x = self.activation(x)
if self.pooling is not None:
y = self.pooling(x)
else:
y = x
if self.return_before_pooling:
return y, x
else:
return y
class DoubleConvBlock(nn.Module):
""" Double conv layers block """
def __init__(self, in_depth, out_depth, mid_depth=None, kernel=3,
stride=1, padding=None, dilation=None, normalization=False,
norm_type='BN', pooling=True, return_before_pooling=False,
normalization_block='Both'):
""" DoubleConvBlock constructor
Args:
in_depth: depth of input tensor
out_depth: number of out channels produced by the second convolution
mid_depth: number of out channels produced by the first convolution;
default is mid_depth = out_depth.
kernel: kernel size (int); default is 3.
stride: stide of the convolution; default is 1.
padding: zero-padding added to both sides before the convolution
operations; default is [1, 1].
dilation: spacing between elements of each conv kernel; default is [1, 1].
normalization: boolean flag to apply normalization after the conv;
default is false.
norm_type: normalization operation: 'BN' for batch-norm (default),
'IN' for instance normalization.
pooling: boolean flag to apply a 2 x 2 max-pooling with stride of 2
before returning the final result; default is false.
return_before_pooling: boolean flag to return the tensor before
applying max-pooling (if 'pooling' is true); default is false.
normalization_block: if normalization flag is set to true; this
variable controls when to apply the normalization process. It can be:
'Both' (apply normalization after both conv layers), 'First', or
'Second'.
Returns:
DoubleConvBlock object with the selected settings.
"""
super().__init__()
if padding is None:
padding = [1, 1]
if dilation is None:
dilation = [1, 1]
if mid_depth is None:
mid_depth = out_depth
if normalization:
if normalization_block == 'First':
norm = [True, False]
elif normalization_block == 'Second':
norm = [False, True]
elif normalization_block == 'Both':
norm = [True, True]
else:
raise NotImplementedError
else:
norm = [False, False]
self.double_conv_1 = ConvBlock(kernel=kernel, in_depth=in_depth,
conv_depth=mid_depth, stride=stride, padding=padding[0],
pooling=False, dilation=dilation[0], norm_type=norm_type,
normalization=norm[0])
self.double_conv_2 = ConvBlock(kernel=kernel, in_depth=mid_depth,
conv_depth=out_depth, stride=stride, padding=padding[1],
pooling=pooling, dilation=dilation[1], norm_type=norm_type,
normalization=norm[1], return_before_pooling=return_before_pooling)
def forward(self, x):
""" Forward function of DoubleConvBlock module
Args:
x: input tensor
Returns:
y: processed tensor
"""
x = self.double_conv_1(x)
return self.double_conv_2(x)
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {'in_depth': 1, 'out_depth': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) *
(-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) *
(0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + (
3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >=
-1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 +
x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.0
tmp5 = tmp3 > tmp4
tmp6 = 0.01
tmp7 = tmp3 * tmp6
tmp8 = tl.where(tmp5, tmp3, tmp7)
tl.store(out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad2d_0[grid(144)](primals_3, buf0,
144, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(64)](buf1, primals_2,
buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
triton_poi_fused_replication_pad2d_0[grid(144)](buf3, buf4, 144,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
buf7 = buf1
del buf1
triton_poi_fused_convolution_leaky_relu_1[grid(64)](buf5, primals_5,
buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del primals_5
buf8 = empty_strided_cuda((4, 1, 2, 2), (4, 4, 2, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 2, 2), (4, 4, 2, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(16)](buf7, buf8,
buf9, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf8, primals_1, primals_4, buf0, buf2, buf3, buf4, buf6, buf7, buf9
class ConvBlock(nn.Module):
""" Conv layer block """
def __init__(self, kernel, in_depth, conv_depth, stride=1, padding=1,
normalization=False, norm_type='BN', pooling=False,
bias_initialization='zeros', activation=True, dilation=1,
return_before_pooling=False):
""" ConvBlock constructor
Args:
kernel: kernel size (int)
in_depth: depth of input tensor
conv_depth: number of out channels produced by the convolution
stride: stide of the convolution; default is 1.
padding: zero-padding added to both sides before the convolution
operation; default is 1.
normalization: boolean flag to apply normalization after the conv;
default is false.
norm_type: normalization operation: 'BN' for batch-norm (default),
'IN' for instance normalization.
pooling: boolean flag to apply a 2 x 2 max-pooling with stride of 2
before returning the final result; default is false.
bias_initialization: bias initialization: 'zeros' (default) or 'ones'.
activation: boolean flag to apply a leaky ReLU activation; default is
true.
dilation: spacing between conv kernel elements; default is 1.
return_before_pooling: boolean flag to return the tensor before
applying max-pooling (if 'pooling' is true); default is false.
Returns:
ConvBlock object with the selected settings.
"""
super(ConvBlock, self).__init__()
conv = torch.nn.Conv2d(in_depth, conv_depth, kernel, stride=stride,
dilation=dilation, padding=padding, padding_mode='replicate')
torch.nn.init.kaiming_normal_(conv.weight)
if bias_initialization == 'ones':
torch.nn.init.ones_(conv.bias)
elif bias_initialization == 'zeros':
torch.nn.init.zeros_(conv.bias)
else:
raise NotImplementedError
if activation:
self.activation = torch.nn.LeakyReLU(inplace=False)
else:
self.activation = None
if normalization:
if norm_type == 'BN':
self.normalization = torch.nn.BatchNorm2d(conv_depth,
affine=True)
elif norm_type == 'IN':
self.normalization = torch.nn.InstanceNorm2d(conv_depth,
affine=False)
else:
raise NotImplementedError
else:
self.normalization = None
self.conv = conv
if pooling:
self.pooling = torch.nn.MaxPool2d(2, stride=2)
else:
self.pooling = None
self.return_before_pooling = return_before_pooling
def forward(self, x):
""" Forward function of ConvBlock module
Args:
x: input tensor.
Returns:
y: processed tensor.
"""
x = self.conv(x)
if self.normalization is not None:
x = self.normalization(x)
if self.activation is not None:
x = self.activation(x)
if self.pooling is not None:
y = self.pooling(x)
else:
y = x
if self.return_before_pooling:
return y, x
else:
return y
class DoubleConvBlockNew(nn.Module):
""" Double conv layers block """
def __init__(self, in_depth, out_depth, mid_depth=None, kernel=3,
stride=1, padding=None, dilation=None, normalization=False,
norm_type='BN', pooling=True, return_before_pooling=False,
normalization_block='Both'):
""" DoubleConvBlock constructor
Args:
in_depth: depth of input tensor
out_depth: number of out channels produced by the second convolution
mid_depth: number of out channels produced by the first convolution;
default is mid_depth = out_depth.
kernel: kernel size (int); default is 3.
stride: stide of the convolution; default is 1.
padding: zero-padding added to both sides before the convolution
operations; default is [1, 1].
dilation: spacing between elements of each conv kernel; default is [1, 1].
normalization: boolean flag to apply normalization after the conv;
default is false.
norm_type: normalization operation: 'BN' for batch-norm (default),
'IN' for instance normalization.
pooling: boolean flag to apply a 2 x 2 max-pooling with stride of 2
before returning the final result; default is false.
return_before_pooling: boolean flag to return the tensor before
applying max-pooling (if 'pooling' is true); default is false.
normalization_block: if normalization flag is set to true; this
variable controls when to apply the normalization process. It can be:
'Both' (apply normalization after both conv layers), 'First', or
'Second'.
Returns:
DoubleConvBlock object with the selected settings.
"""
super().__init__()
if padding is None:
padding = [1, 1]
if dilation is None:
dilation = [1, 1]
if mid_depth is None:
mid_depth = out_depth
if normalization:
if normalization_block == 'First':
norm = [True, False]
elif normalization_block == 'Second':
norm = [False, True]
elif normalization_block == 'Both':
norm = [True, True]
else:
raise NotImplementedError
else:
norm = [False, False]
self.double_conv_1 = ConvBlock(kernel=kernel, in_depth=in_depth,
conv_depth=mid_depth, stride=stride, padding=padding[0],
pooling=False, dilation=dilation[0], norm_type=norm_type,
normalization=norm[0])
self.double_conv_2 = ConvBlock(kernel=kernel, in_depth=mid_depth,
conv_depth=out_depth, stride=stride, padding=padding[1],
pooling=pooling, dilation=dilation[1], norm_type=norm_type,
normalization=norm[1], return_before_pooling=return_before_pooling)
def forward(self, input_0):
primals_1 = self.double_conv_1.conv.weight
primals_2 = self.double_conv_1.conv.bias
primals_4 = self.double_conv_2.conv.weight
primals_5 = self.double_conv_2.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| manipopopo/C5 | DoubleConvBlock | false | 16,002 | [
"Apache-2.0"
] | 51 | 154eb38c330e65476ddb77836948a28237f23c88 | https://github.com/manipopopo/C5/tree/154eb38c330e65476ddb77836948a28237f23c88 |
iCaRL_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dq/cdq6dx577blbfbzzswpt7egiozmhbygzfxmihglfhdljclos4vjq.py
# Topologically Sorted Source Nodes: [target, logist, add, log, p0, sub, sub_1, add_1, log_1, p1, add_2, loss, loss_1], Original ATen: [aten._to_copy, aten.add, aten.log, aten.mul, aten.rsub, aten.neg, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# log => log
# log_1 => log_1
# logist => convert_element_type
# loss => neg
# loss_1 => sum_1
# p0 => mul
# p1 => mul_1
# sub => sub
# sub_1 => sub_1
# target => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg1_1, torch.float64), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.float64), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 1e-06), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %log), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-06), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg,), kwargs = {})
triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tmp0.to(tl.float64)
tmp3 = tmp2.to(tl.float64)
tmp4 = tl.full([1], 1e-06, tl.float64)
tmp5 = tmp3 + tmp4
tmp6 = libdevice.log(tmp5)
tmp7 = tmp1 * tmp6
tmp8 = tl.full([1], 1.0, tl.float64)
tmp9 = tmp8 - tmp1
tmp10 = tmp8 - tmp3
tmp11 = tmp10 + tmp4
tmp12 = libdevice.log(tmp11)
tmp13 = tmp9 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float64)
# Topologically Sorted Source Nodes: [target, logist, add, log, p0, sub, sub_1, add_1, log_1, p1, add_2, loss, loss_1], Original ATen: [aten._to_copy, aten.add, aten.log, aten.mul, aten.rsub, aten.neg, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0.run(arg1_1, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class iCaRL_loss(nn.Module):
def __init__(self):
super(iCaRL_loss, self).__init__()
def forward(self, logist, target):
eps = 1e-06
logist = logist.double()
target = target.double()
p0 = torch.mul(target, torch.log(logist + eps))
p1 = torch.mul(1 - target, torch.log(1 - logist + eps))
loss = -torch.add(p0, p1)
loss = torch.sum(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0.to(tl.float64)
tmp3 = tmp2.to(tl.float64)
tmp4 = tl.full([1], 1e-06, tl.float64)
tmp5 = tmp3 + tmp4
tmp6 = libdevice.log(tmp5)
tmp7 = tmp1 * tmp6
tmp8 = tl.full([1], 1.0, tl.float64)
tmp9 = tmp8 - tmp1
tmp10 = tmp8 - tmp3
tmp11 = tmp10 + tmp4
tmp12 = libdevice.log(tmp11)
tmp13 = tmp9 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float64)
get_raw_stream(0)
triton_per_fused__to_copy_add_log_mul_neg_rsub_sum_0[grid(1)](arg1_1,
arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class iCaRL_lossNew(nn.Module):
def __init__(self):
super(iCaRL_lossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mao-example/End-to-End-Incremental-Learning | iCaRL_loss | false | 16,003 | [
"MIT"
] | 53 | 39d6f4e594e805a713aa7a1deedbcb03d1f2c9cc | https://github.com/mao-example/End-to-End-Incremental-Learning/tree/39d6f4e594e805a713aa7a1deedbcb03d1f2c9cc |
LeNetPP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pv/cpv7qykvsb2x3mhhybt3e54zyj7yf52qrhpen6orewcwoez2g3mx.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/af/cafxypwziobqgsujacsjlhl4vifehmfd4kjv6mmsqwsmu52bn4ve.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctsxf36l57u3mq2ugcgebaybh3dyc2ufbhccjblzb2rb7pjushfr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qw/cqwg3xdwx3eugpxibvknyiosqdjdt7h7peu75twynersbsv447ra.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/23/c232cjjrpfm7piga4i2u2f6iwdslqscw63lwt2xylgovf64odkma.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4h/c4hcobkjh5ndccnwn27fc3y3a45kthbpdekx4r7i2rmnirh3widu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 1024], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 576
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (576*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + (32*x2) + (18432*y1)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lp/clpvz4wkxjigctsxh7jhtdveyv4cac4rp62cxpmaryo57tkuby3z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._prelu_kernel]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_6 = async_compile.triton('triton_poi_fused__prelu_kernel_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x0), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/h7/ch7qxrxvtfhpc4flqbdhjn6lyu25duxy6rqrd4ufpbdezerdqqa3.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_5, %primals_6, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %convolution_1), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_7 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cx/ccx74wlwwf7cb5yjcdq3ooqngkcyb27ev663ivoe6bet5sddq3c4.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32) % 12
x2 = (xindex // 384)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (1536*x2)), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (1536*x2)), None)
tmp3 = tl.load(in_ptr0 + (768 + x0 + (64*x1) + (1536*x2)), None)
tmp5 = tl.load(in_ptr0 + (800 + x0 + (64*x1) + (1536*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lb/clbs5plq3l54kagj7xyoz4bjep23pwvvxay3m3nhjvzfteysjtlk.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_8, %primals_9, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %convolution_2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_9 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jb/cjb5dhwcpestwfavlatzw7rhg5pwgtoxv27zgkjahh7xtb2kiuoj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 6
x2 = (xindex // 384)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (1536*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (1536*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (768 + x0 + (128*x1) + (1536*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (832 + x0 + (128*x1) + (1536*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d3/cd3jr7trplj436qr2ltgnqe53j66334viwky4sltzir4rahlmv53.py
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_6 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_4 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_14, %primals_15, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %convolution_4), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_11 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_11(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fc/cfc5r5uj2epaj3kwiyokvsydl4fpwcq6tzrvfzlhl3nmdpzkmuns.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_8 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_5, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y5 = yindex
y4 = (yindex // 9)
y6 = yindex % 9
tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (768 + x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (896 + x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (9*x2) + (1152*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xr/cxrbamhin4b5pl7ehs6gvheuobdvsywbxex23ck5t2wetzlfapk5.py
# Topologically Sorted Source Nodes: [ip1], Original ATen: [aten._prelu_kernel]
# Source node to ATen node mapping:
# ip1 => gt_6, mul_6, where_6
# Graph fragment:
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %addmm), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %addmm, %mul_6), kwargs = {})
triton_poi_fused__prelu_kernel_13 = async_compile.triton('triton_poi_fused__prelu_kernel_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/73/c73lsuio34njq4fgaqysgeklkqhrwubqvg3ujiqxlhe6sgg544m5.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_14 = async_compile.triton('triton_per_fused__log_softmax_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_14(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 24, 24), (576, 576, 24, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_6, (32, ), (1, ))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (1, ), (1, ))
assert_size_stride(primals_11, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_12, (64, ), (1, ))
assert_size_stride(primals_13, (1, ), (1, ))
assert_size_stride(primals_14, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (1, ), (1, ))
assert_size_stride(primals_17, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_18, (128, ), (1, ))
assert_size_stride(primals_19, (1, ), (1, ))
assert_size_stride(primals_20, (2, 1152), (1152, 1))
assert_size_stride(primals_21, (2, ), (1, ))
assert_size_stride(primals_22, (1, ), (1, ))
assert_size_stride(primals_23, (10, 2), (2, 1))
assert_size_stride(primals_24, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_5, buf0, 1024, 25, grid=grid(1024, 25), stream=stream0)
del primals_5
buf1 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_8, buf1, 2048, 25, grid=grid(2048, 25), stream=stream0)
del primals_8
buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_11, buf2, 4096, 25, grid=grid(4096, 25), stream=stream0)
del primals_11
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_14, buf3, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_14
buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_17, buf4, 16384, 25, grid=grid(16384, 25), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 24, 24), (18432, 576, 24, 1))
buf6 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf5, primals_2, buf6, 128, 576, grid=grid(128, 576), stream=stream0)
del primals_2
buf7 = reinterpret_tensor(buf5, (4, 32, 24, 24), (18432, 1, 768, 32), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._prelu_kernel]
triton_poi_fused__prelu_kernel_6.run(buf6, primals_4, buf7, 73728, grid=grid(73728), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 24, 24), (18432, 1, 768, 32))
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_7.run(buf9, primals_6, primals_7, buf10, 73728, grid=grid(73728), stream=stream0)
del primals_6
buf11 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.float32)
buf12 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 12, 12), (9216, 1, 768, 64))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_9.run(buf14, primals_9, primals_10, buf15, 36864, grid=grid(36864), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 12, 12), (9216, 1, 768, 64))
buf17 = buf16; del buf16 # reuse
buf18 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_9.run(buf17, primals_12, primals_13, buf18, 36864, grid=grid(36864), stream=stream0)
del primals_12
buf19 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch.float32)
buf20 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf18, buf19, buf20, 9216, grid=grid(9216), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 6, 6), (4608, 1, 768, 128))
buf22 = buf21; del buf21 # reuse
buf23 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_11.run(buf22, primals_15, primals_16, buf23, 18432, grid=grid(18432), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 6, 6), (4608, 1, 768, 128))
buf25 = buf24; del buf24 # reuse
buf26 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_11.run(buf25, primals_18, primals_19, buf26, 18432, grid=grid(18432), stream=stream0)
del primals_18
buf27 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128), torch.int8)
buf28 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf26, buf27, buf28, 36, 128, grid=grid(36, 128), stream=stream0)
buf29 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_21, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0), reinterpret_tensor(primals_20, (1152, 2), (1, 1152), 0), alpha=1, beta=1, out=buf29)
del primals_21
buf30 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [ip1], Original ATen: [aten._prelu_kernel]
triton_poi_fused__prelu_kernel_13.run(buf29, primals_22, buf30, 8, grid=grid(8), stream=stream0)
buf31 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [ip2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_24, buf30, reinterpret_tensor(primals_23, (2, 10), (1, 2), 0), alpha=1, beta=1, out=buf31)
del primals_24
buf34 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_14.run(buf31, buf34, 4, 10, grid=grid(4), stream=stream0)
del buf31
return (buf30, buf34, primals_1, primals_3, primals_4, buf0, primals_7, buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4, primals_19, primals_22, buf6, buf7, buf9, buf10, buf11, buf12, buf14, buf15, buf17, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0), buf29, buf30, buf34, primals_23, primals_20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 24, 24), (576, 576, 24, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((2, 1152), (1152, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((10, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LeNetPP(nn.Module):
def __init__(self, dim_hidden=2, num_classes=10):
super(LeNetPP, self).__init__()
self.num_classes = num_classes
self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2)
self.prelu3_2 = nn.PReLU()
self.prelu_ip1 = nn.PReLU()
self.ip1 = nn.Linear(128 * 3 * 3, dim_hidden)
self.ip2 = nn.Linear(dim_hidden, num_classes)
def forward(self, x):
x = self.prelu1_1(self.conv1_1(x))
x = self.prelu1_2(self.conv1_2(x))
x = F.max_pool2d(x, 2)
x = self.prelu2_1(self.conv2_1(x))
x = self.prelu2_2(self.conv2_2(x))
x = F.max_pool2d(x, 2)
x = self.prelu3_1(self.conv3_1(x))
x = self.prelu3_2(self.conv3_2(x))
x = F.max_pool2d(x, 2)
x = x.view(-1, 128 * 3 * 3)
ip1 = self.prelu_ip1(self.ip1(x))
ip2 = self.ip2(ip1)
return ip1, F.log_softmax(ip2, dim=1)
def get_inputs():
return [torch.rand([4, 1, 24, 24])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 576
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 576 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + 32 * x2 + 18432 * y1), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__prelu_kernel_6(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 12
x2 = xindex // 384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 1536 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 1536 * x2), None)
tmp3 = tl.load(in_ptr0 + (768 + x0 + 64 * x1 + 1536 * x2), None)
tmp5 = tl.load(in_ptr0 + (800 + x0 + 64 * x1 + 1536 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 6
x2 = xindex // 384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 1536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (768 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (832 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_11(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 36
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y5 = yindex
y4 = yindex // 9
y6 = yindex % 9
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 1536 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 1536 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (768 + x2 + 256 * y0 + 1536 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (896 + x2 + 256 * y0 + 1536 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 9 * x2 + 1152 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused__prelu_kernel_13(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused__log_softmax_14(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 24, 24), (576, 576, 24, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_12, (64,), (1,))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (1,), (1,))
assert_size_stride(primals_17, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_18, (128,), (1,))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (2, 1152), (1152, 1))
assert_size_stride(primals_21, (2,), (1,))
assert_size_stride(primals_22, (1,), (1,))
assert_size_stride(primals_23, (10, 2), (2, 1))
assert_size_stride(primals_24, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 25)](primals_5, buf0, 1024, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_5
buf1 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
triton_poi_fused_1[grid(2048, 25)](primals_8, buf1, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch
.float32)
triton_poi_fused_2[grid(4096, 25)](primals_11, buf2, 4096, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 25)](primals_14, buf3, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_14
buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 25)](primals_17, buf4, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_17
buf5 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 24, 24), (18432, 576, 24, 1))
buf6 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32),
torch.float32)
triton_poi_fused_convolution_5[grid(128, 576)](buf5, primals_2,
buf6, 128, 576, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf7 = reinterpret_tensor(buf5, (4, 32, 24, 24), (18432, 1, 768, 32), 0
)
del buf5
triton_poi_fused__prelu_kernel_6[grid(73728)](buf6, primals_4, buf7,
73728, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 24, 24), (18432, 1, 768, 32))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_7[grid(73728)](buf9,
primals_6, primals_7, buf10, 73728, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_6
buf11 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32),
torch.float32)
buf12 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(18432)](buf10,
buf11, buf12, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 12, 12), (9216, 1, 768, 64))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf14,
primals_9, primals_10, buf15, 36864, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_9
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 12, 12), (9216, 1, 768, 64))
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf17,
primals_12, primals_13, buf18, 36864, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_12
buf19 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch
.float32)
buf20 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(9216)](buf18,
buf19, buf20, 9216, XBLOCK=256, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 6, 6), (4608, 1, 768, 128))
buf22 = buf21
del buf21
buf23 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf22,
primals_15, primals_16, buf23, 18432, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 6, 6), (4608, 1, 768, 128))
buf25 = buf24
del buf24
buf26 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf25,
primals_18, primals_19, buf26, 18432, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_18
buf27 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128),
torch.int8)
buf28 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(36, 128)](buf26,
buf27, buf28, 36, 128, XBLOCK=4, YBLOCK=64, num_warps=4,
num_stages=1)
buf29 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_21, reinterpret_tensor(buf28, (4, 1152
), (1152, 1), 0), reinterpret_tensor(primals_20, (1152, 2), (1,
1152), 0), alpha=1, beta=1, out=buf29)
del primals_21
buf30 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused__prelu_kernel_13[grid(8)](buf29, primals_22, buf30,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_24, buf30, reinterpret_tensor(
primals_23, (2, 10), (1, 2), 0), alpha=1, beta=1, out=buf31)
del primals_24
buf34 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_14[grid(4)](buf31, buf34, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf31
return (buf30, buf34, primals_1, primals_3, primals_4, buf0, primals_7,
buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4,
primals_19, primals_22, buf6, buf7, buf9, buf10, buf11, buf12,
buf14, buf15, buf17, buf18, buf19, buf20, buf22, buf23, buf25,
buf26, buf27, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0),
buf29, buf30, buf34, primals_23, primals_20)
class LeNetPPNew(nn.Module):
def __init__(self, dim_hidden=2, num_classes=10):
super(LeNetPPNew, self).__init__()
self.num_classes = num_classes
self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2)
self.prelu3_2 = nn.PReLU()
self.prelu_ip1 = nn.PReLU()
self.ip1 = nn.Linear(128 * 3 * 3, dim_hidden)
self.ip2 = nn.Linear(dim_hidden, num_classes)
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.prelu1_1.weight
primals_5 = self.conv1_2.weight
primals_6 = self.conv1_2.bias
primals_7 = self.prelu1_2.weight
primals_8 = self.conv2_1.weight
primals_9 = self.conv2_1.bias
primals_10 = self.prelu2_1.weight
primals_11 = self.conv2_2.weight
primals_12 = self.conv2_2.bias
primals_13 = self.prelu2_2.weight
primals_14 = self.conv3_1.weight
primals_15 = self.conv3_1.bias
primals_16 = self.prelu3_1.weight
primals_17 = self.conv3_2.weight
primals_18 = self.conv3_2.bias
primals_19 = self.prelu3_2.weight
primals_22 = self.prelu_ip1.weight
primals_20 = self.ip1.weight
primals_21 = self.ip1.bias
primals_23 = self.ip2.weight
primals_24 = self.ip2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24])
return output[0], output[1]
| lyakaap/image-feature-learning-pytorch | LeNetPP | false | 16,004 | [
"MIT"
] | 55 | 241ed10d4312fedfb23015f6a50cdca8f2b0ad9e | https://github.com/lyakaap/image-feature-learning-pytorch/tree/241ed10d4312fedfb23015f6a50cdca8f2b0ad9e |
ScaledDotProductAttentionMemory | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xe/cxeuttfzx4xq2jmzwzvkech4crjirky5wjckb34lnep5o6sog3uw.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fn/cfnr6wn6wbusamhilcgctjberp7g5kksyakcze32k6ntswznc2de.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ka/ckaneo6wn23ipwgbubou64jdtwieswlrn7w7r7kqky4aagh3v6l3.py
# Topologically Sorted Source Nodes: [wrapped_sqrt, att_1], Original ATen: [aten.sqrt, aten._softmax]
# Source node to ATen node mapping:
# att_1 => exp
# wrapped_sqrt => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, %where_self), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_sqrt_2 = async_compile.triton('triton_poi_fused__softmax_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 2.0, tl.float64)
tmp2 = tl.full([1], 0.0, tl.float64)
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6.to(tl.float64)
tmp21 = tmp20 * tmp1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp19 / tmp22
tmp24 = tl_math.exp(tmp23)
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, ), (1, ))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_4, buf3, 256, grid=grid(256), stream=stream0)
del primals_4
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_6, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_6
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wrapped_sqrt, att_1], Original ATen: [aten.sqrt, aten._softmax]
triton_poi_fused__softmax_sqrt_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf8, 256, grid=grid(256), stream=stream0)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf11)
del primals_11
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), primals_10, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
class ScaledDotProductAttentionMemory(nn.Module):
"""
Scaled dot-product attention with memory
"""
def __init__(self, d_model, d_k, d_v, h, m):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
:param m: Number of memory slots
"""
super(ScaledDotProductAttentionMemory, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, queries, keys, values, attention_mask=None,
attention_weights=None):
"""
Computes
:param queries: Queries (b_s, nq, d_model)
:param keys: Keys (b_s, nk, d_model)
:param values: Values (b_s, nk, d_model)
:param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.
:param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).
:return:
"""
b_s, nq = queries.shape[:2]
nk = keys.shape[1]
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2,
1, 3)
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2,
1, 3)
att = torch.matmul(q, k) / np.sqrt(self.d_k)
if attention_weights is not None:
att = torch.cat([att[:, :, :, :nk] * attention_weights, att[:,
:, :, nk:]], -1)
if attention_mask is not None:
att[:, :, :, :nk] = att[:, :, :, :nk].masked_fill(attention_mask,
-np.inf)
att = torch.softmax(att, -1)
out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s,
nq, self.h * self.d_v)
out = self.fc_o(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4, 'm': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 2.0, tl.float64)
tmp2 = tl.full([1], 0.0, tl.float64)
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6.to(tl.float64)
tmp21 = tmp20 * tmp1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp19 / tmp22
tmp24 = tl_math.exp(tmp23)
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf0, primals_4, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, primals_6, buf4, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_sqrt_2[grid(256)](buf5, buf6, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_0[grid(256)](buf2, primals_8, buf8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0
), alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), primals_10, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttentionMemoryNew(nn.Module):
"""
Scaled dot-product attention with memory
"""
def __init__(self, d_model, d_k, d_v, h, m):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
:param m: Number of memory slots
"""
super(ScaledDotProductAttentionMemoryNew, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, input_0, input_1, input_2):
primals_3 = self.fc_q.weight
primals_4 = self.fc_q.bias
primals_5 = self.fc_k.weight
primals_6 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_10 = self.fc_o.weight
primals_11 = self.fc_o.bias
primals_1 = input_0
primals_2 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| mandaltanmoy1938/VisualGPT | ScaledDotProductAttentionMemory | false | 16,005 | [
"MIT"
] | 86 | 9ba78948282fdca502d5030f4eccc3df562982c3 | https://github.com/mandaltanmoy1938/VisualGPT/tree/9ba78948282fdca502d5030f4eccc3df562982c3 |
TransformerEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7m/c7my77j7miwq7j5yz26lhwtp4fyb6qiw2vuvksvbnxxhdrtuljuq.py
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# x => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uy/cuyacfovgswdpyhlq2s2chxvljavfbdvz7wnuo2oaa6t6ewmxjgf.py
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# x => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hp/chp5axid43qqadahqjo7e75btcycsuvzn5jw4a2wt3seu5og5huh.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3m/c3mh4ag5y7d2kfw4id5vjhn3zjt2ucu33pwtmgndlspt4gg5cawj.py
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_1 => add_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5m/c5m2x4kwr66u6jzlkjcacrwhzqxhxsn3hv6ryzwol7bzp7uppnze.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pg/cpgskb56mehof5k52uslszbldka4jbq52y6dhbe764xtjdj3lwxc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(primals_1, buf9, buf10, buf11, 4, grid=grid(4), stream=stream0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf10, buf11, primals_6, primals_7, buf12, 16, grid=grid(16), stream=stream0)
del primals_7
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13)
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf14, primals_9, 16, grid=grid(16), stream=stream0)
del primals_9
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf16, buf12, primals_11, 16, grid=grid(16), stream=stream0)
del primals_11
buf17 = buf11; del buf11 # reuse
buf18 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf16, buf17, buf18, 4, grid=grid(4), stream=stream0)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf16, buf17, buf18, primals_12, primals_13, buf19, 16, grid=grid(16), stream=stream0)
del buf17
del buf18
del primals_13
return (buf19, primals_1, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf14, buf16, primals_10, primals_8, primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class TransformerEncoder(torch.nn.Module):
def __init__(self, embed_dim, num_heads, dropout, feedforward_dim):
super().__init__()
self.attn = torch.nn.MultiheadAttention(embed_dim, num_heads,
dropout=dropout)
self.linear_1 = torch.nn.Linear(embed_dim, feedforward_dim)
self.linear_2 = torch.nn.Linear(feedforward_dim, embed_dim)
self.layernorm_1 = torch.nn.LayerNorm(embed_dim)
self.layernorm_2 = torch.nn.LayerNorm(embed_dim)
def forward(self, x_in):
attn_out, _ = self.attn(x_in, x_in, x_in)
x = self.layernorm_1(x_in + attn_out)
ff_out = self.linear_2(torch.nn.functional.relu(self.linear_1(x)))
x = self.layernorm_2(x + ff_out)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'dropout': 0.5,
'feedforward_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(4)](primals_1, buf9,
buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (4, 4), (1,
4), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_6[grid(16)](buf14, primals_9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf15)
buf16 = buf15
del buf15
triton_poi_fused_add_7[grid(16)](buf16, buf12, primals_11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
buf17 = buf11
del buf11
buf18 = buf10
del buf10
triton_poi_fused_native_layer_norm_8[grid(4)](buf16, buf17, buf18,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(16)](buf16, buf17, buf18,
primals_12, primals_13, buf19, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf17
del buf18
del primals_13
return (buf19, primals_1, primals_6, primals_12, buf6,
reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf14,
buf16, primals_10, primals_8, primals_4, reinterpret_tensor(buf2, (
4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1,
4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class TransformerEncoderNew(torch.nn.Module):
def __init__(self, embed_dim, num_heads, dropout, feedforward_dim):
super().__init__()
self.attn = torch.nn.MultiheadAttention(embed_dim, num_heads,
dropout=dropout)
self.linear_1 = torch.nn.Linear(embed_dim, feedforward_dim)
self.linear_2 = torch.nn.Linear(feedforward_dim, embed_dim)
self.layernorm_1 = torch.nn.LayerNorm(embed_dim)
self.layernorm_2 = torch.nn.LayerNorm(embed_dim)
def forward(self, input_0):
primals_2 = self.attn.in_proj_weight
primals_3 = self.attn.in_proj_bias
primals_1 = self.attn.out_proj.weight
primals_5 = self.attn.out_proj.bias
primals_4 = self.linear_1.weight
primals_6 = self.linear_1.bias
primals_8 = self.linear_2.weight
primals_7 = self.linear_2.bias
primals_9 = self.layernorm_1.weight
primals_11 = self.layernorm_1.bias
primals_12 = self.layernorm_2.weight
primals_13 = self.layernorm_2.bias
primals_10 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| mamuncseru/Denoise-Transformer-AutoEncoder | TransformerEncoder | false | 16,006 | [
"MIT"
] | 265 | 56b3ff8b252ad24a4ed769158e3f0648090e1ffd | https://github.com/mamuncseru/Denoise-Transformer-AutoEncoder/tree/56b3ff8b252ad24a4ed769158e3f0648090e1ffd |
dice_bce_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/aw/cawz3vdu3wop7jhh2u65rssuvw54acfgrpbwcugsoe3s6yya6kjw.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, i, j, add_1, add_2, score, mean, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.mean, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# i => sum_1
# intersection => sum_3
# j => sum_2
# loss => sub_2
# mean => mean_1
# mul => mul_2
# mul_1 => mul_3
# score => div
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 0.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean_1), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 0.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp18 / tmp19
tmp21 = tmp19 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, i, j, add_1, add_2, score, mean, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.mean, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class dice_bce_loss(nn.Module):
def __init__(self, batch=True):
super(dice_bce_loss, self).__init__()
self.batch = batch
self.bce_loss = nn.BCELoss()
def soft_dice_coeff(self, y_true, y_pred):
smooth = 0.0
if self.batch:
i = torch.sum(y_true)
j = torch.sum(y_pred)
intersection = torch.sum(y_true * y_pred)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2.0 * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred):
loss = 1 - self.soft_dice_coeff(y_true, y_pred)
return loss
def forward(self, y_pred, y_true):
self.bce_loss(y_pred, y_true)
b = self.soft_dice_loss(y_true, y_pred)
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 0.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp18 / tmp19
tmp21 = tmp19 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class dice_bce_lossNew(nn.Module):
def __init__(self, batch=True):
super(dice_bce_lossNew, self).__init__()
self.batch = batch
self.bce_loss = nn.BCELoss()
def soft_dice_coeff(self, y_true, y_pred):
smooth = 0.0
if self.batch:
i = torch.sum(y_true)
j = torch.sum(y_pred)
intersection = torch.sum(y_true * y_pred)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2.0 * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred):
loss = 1 - self.soft_dice_coeff(y_true, y_pred)
return loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| manuel-rdz/SGL-Retinal-Vessel-Segmentation | dice_bce_loss | false | 16,007 | [
"MIT"
] | 45 | 7897d977e77aa0b5d3acb86e0aa74c6829d67415 | https://github.com/manuel-rdz/SGL-Retinal-Vessel-Segmentation/tree/7897d977e77aa0b5d3acb86e0aa74c6829d67415 |
TVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/h7/ch7blp6x7o26nygj7ttg2cda3wtf7c3zmatnitqreblcmuie5nzj.py
# Topologically Sorted Source Nodes: [sub, abs_1, h_tv, truediv, sub_1, abs_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.abs, aten.sum, aten.div, aten.add, aten.mul]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# add => add
# h_tv => sum_1
# mul => mul
# sub => sub
# sub_1 => sub_1
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# w_tv => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 12), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 12), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {})
triton_per_fused_abs_add_div_mul_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_div_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_mul_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex % 3
r3 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.08333333333333333
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 1.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, h_tv, truediv, sub_1, abs_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.abs, aten.sum, aten.div, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_mul_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class TVLoss(nn.Module):
def __init__(self, TVLoss_weight=1):
super(TVLoss, self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2] - 1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.abs(x[:, :, 1:, :] - x[:, :, :h_x - 1, :]).sum()
w_tv = torch.abs(x[:, :, :, 1:] - x[:, :, :, :w_x - 1]).sum()
return self.TVLoss_weight * (h_tv / count_h + w_tv / count_w
) / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mul_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.08333333333333333
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 1.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mul_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self, TVLoss_weight=1):
super(TVLossNew, self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| manuel-rdz/SGL-Retinal-Vessel-Segmentation | TVLoss | false | 16,008 | [
"MIT"
] | 45 | 7897d977e77aa0b5d3acb86e0aa74c6829d67415 | https://github.com/manuel-rdz/SGL-Retinal-Vessel-Segmentation/tree/7897d977e77aa0b5d3acb86e0aa74c6829d67415 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xu/cxuxmhz7cepjp4vjwppki2dqyg7eikcdiuff36rdkcryhwu6d2ab.py
# Topologically Sorted Source Nodes: [present], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# present => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute_3, %permute_2],), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 + x0 + (12*x1) + (48*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (8 + x0 + (12*x1) + (48*((-4) + x2))), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yp/cypnjebs6j7r2f6dhwegd3kxx7t6gutrxri4rbhiplk4ojfdcu4x.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# w => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/so/csofmscwslhuddhgtulfsbs555c5vsyfmaldv7a2zxknv6bef5m7.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# w => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s4/cs472yivvl3yzse325afzknsz7ua5dqrqzmwls3lwujk3hte6xkl.py
# Topologically Sorted Source Nodes: [w_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# w_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_9, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_9, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zh/czh6tw7ngffcygnivwvcjex5edxy3ms4t27ymyn2hemxlpspxzq7.py
# Topologically Sorted Source Nodes: [w_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# w_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/km/ckmapv33krv5kdz7mxm4bclabqsglvml3hn4pskmsy3wkclhu5fl.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# a => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4k/c4kxxzyxk45cygbwnqkt5vb2udxol67wuotkh6zmuwsinb63uprn.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_5 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((8, 4, 4, 1), (16, 1, 4, 16), torch.float32)
# Topologically Sorted Source Nodes: [present], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, buf1, 128, grid=grid(128), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf0, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf0, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [w_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf0, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del buf0
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf8, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_5, alpha=1, beta=1, out=buf10)
del primals_4
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (2, 4, 4, 4, 1), (64, 16, 1, 4, 4), 0), buf6, buf6, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
from torch.nn import Module
import math
import torch
from torch import nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x.contiguous().view(-1, x.size(-1))
x = torch.addmm(self.bias, x.contiguous().view(-1, x.size(-1)),
self.weight)
x = x.view(*size_out)
return x
class Attention(Module):
def __init__(self, nx, n_ctx, config, scale=False, can_be_stateful=False):
super(Attention, self).__init__()
n_state = nx
assert n_state % config.n_head == 0
self.register_buffer('bias', torch.tril(torch.ones(n_ctx, n_ctx)).
view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.can_be_stateful = can_be_stateful
self.attn_pdrop = nn.Dropout(config.attn_pdrop)
if self.can_be_stateful:
self.register_state('running_keys', torch.zeros((12, 0, 64)))
self.register_state('running_values', torch.zeros((12, 0, 64)))
def _attn(self, q, k, v, mask_self_attention):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
if mask_self_attention is not None:
w = w.masked_fill(mask_self_attention, -10000.0)
w = nn.Softmax(dim=-1)(w)
self.w = self.attn_pdrop(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape)
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, layer_past=None, mask_self_attention=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if self.can_be_stateful and self._is_stateful:
self.running_keys = torch.cat([self.running_keys, key.transpose
(-2, -1)], -2)
key = self.running_keys.transpose(-2, -1)
self.running_values = torch.cat([self.running_values, value], -2)
value = self.running_values
present = torch.stack((key.transpose(-2, -1), value))
a = self._attn(query, key, value, mask_self_attention)
a = self.merge_heads(a)
a = self.c_proj(a)
return a, present
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'nx': 4, 'n_ctx': 4, 'config': _mock_config(n_head=4,
attn_pdrop=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import math
from torch import nn
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 + x0 + 12 * x1 + 48 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 12 * x1 + 48 * (-4 + x2)), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((8, 4, 4, 1), (16, 1, 4, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, buf2, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf0, buf3, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 4)](buf0, buf7, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf0
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_4, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), primals_5, alpha=1, beta=1, out=buf10)
del primals_4
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (2, 4, 4, 4, 1), (64, 16, 1, 4, 4), 0
), buf6, buf6, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor(
buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0)
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x.contiguous().view(-1, x.size(-1))
x = torch.addmm(self.bias, x.contiguous().view(-1, x.size(-1)),
self.weight)
x = x.view(*size_out)
return x
class AttentionNew(Module):
def __init__(self, nx, n_ctx, config, scale=False, can_be_stateful=False):
super(AttentionNew, self).__init__()
n_state = nx
assert n_state % config.n_head == 0
self.register_buffer('bias', torch.tril(torch.ones(n_ctx, n_ctx)).
view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.can_be_stateful = can_be_stateful
self.attn_pdrop = nn.Dropout(config.attn_pdrop)
if self.can_be_stateful:
self.register_state('running_keys', torch.zeros((12, 0, 64)))
self.register_state('running_values', torch.zeros((12, 0, 64)))
def _attn(self, q, k, v, mask_self_attention):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
if mask_self_attention is not None:
w = w.masked_fill(mask_self_attention, -10000.0)
w = nn.Softmax(dim=-1)(w)
self.w = self.attn_pdrop(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape)
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_3 = self.c_attn.weight
primals_2 = self.c_attn.bias
primals_5 = self.c_proj.weight
primals_4 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| mandaltanmoy1938/VisualGPT | Attention | false | 16,009 | [
"MIT"
] | 86 | 9ba78948282fdca502d5030f4eccc3df562982c3 | https://github.com/mandaltanmoy1938/VisualGPT/tree/9ba78948282fdca502d5030f4eccc3df562982c3 |
SoftmaxOutputLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py
# Topologically Sorted Source Nodes: [probs], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# probs => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4f/c4fohylrkpmotjodbcxka53btdcdzxeig62kpjpo2l45ahnmgqpg.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem_1
# Graph fragment:
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 1), kwargs = {})
triton_poi_fused_max_1 = async_compile.triton('triton_poi_fused_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp8 = tmp1 / tmp6
tmp9 = tmp7 > tmp8
tmp10 = tmp7 == tmp8
tmp11 = tmp7 != tmp7
tmp12 = tmp8 != tmp8
tmp13 = tmp11 > tmp12
tmp14 = tmp9 | tmp13
tmp15 = tmp11 & tmp12
tmp16 = tmp10 | tmp15
tmp17 = tl.full([1], 0, tl.int64)
tmp18 = tl.full([1], 1, tl.int64)
tmp19 = tmp17 < tmp18
tmp20 = tmp16 & tmp19
tmp21 = tmp14 | tmp20
tmp22 = tl.where(tmp21, tmp7, tmp8)
tmp23 = tl.where(tmp21, tmp17, tmp18)
tmp24 = tmp3 / tmp6
tmp25 = tmp22 > tmp24
tmp26 = tmp22 == tmp24
tmp27 = tmp22 != tmp22
tmp28 = tmp24 != tmp24
tmp29 = tmp27 > tmp28
tmp30 = tmp25 | tmp29
tmp31 = tmp27 & tmp28
tmp32 = tmp26 | tmp31
tmp33 = tl.full([1], 2, tl.int64)
tmp34 = tmp23 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tmp30 | tmp35
tmp37 = tl.where(tmp36, tmp22, tmp24)
tmp38 = tl.where(tmp36, tmp23, tmp33)
tmp39 = tmp5 / tmp6
tmp40 = tmp37 > tmp39
tmp41 = tmp37 == tmp39
tmp42 = tmp37 != tmp37
tmp43 = tmp39 != tmp39
tmp44 = tmp42 > tmp43
tmp45 = tmp40 | tmp44
tmp46 = tmp42 & tmp43
tmp47 = tmp41 | tmp46
tmp48 = tl.full([1], 3, tl.int64)
tmp49 = tmp38 < tmp48
tmp50 = tmp47 & tmp49
tmp51 = tmp45 | tmp50
tmp52 = tl.where(tmp51, tmp37, tmp39)
tmp53 = tl.where(tmp51, tmp38, tmp48)
tl.store(out_ptr0 + (x0), tmp53, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(arg1_1, reinterpret_tensor(arg2_1, (64, 4), (4, 1), 0), reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del arg0_1
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
triton_poi_fused_max_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class SoftmaxOutputLayer(OutputLayer):
"""
Implements a softmax based output layer
"""
def forward(self, hidden):
logits = self.output_projection(hidden)
probs = F.softmax(logits, -1)
_, predictions = torch.max(probs, dim=-1)
return predictions
def loss(self, hidden, labels):
logits = self.output_projection(hidden)
log_probs = F.log_softmax(logits, -1)
return F.nll_loss(log_probs.view(-1, self.output_size), labels.view(-1)
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_max_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp8 = tmp1 / tmp6
tmp9 = tmp7 > tmp8
tmp10 = tmp7 == tmp8
tmp11 = tmp7 != tmp7
tmp12 = tmp8 != tmp8
tmp13 = tmp11 > tmp12
tmp14 = tmp9 | tmp13
tmp15 = tmp11 & tmp12
tmp16 = tmp10 | tmp15
tmp17 = tl.full([1], 0, tl.int64)
tmp18 = tl.full([1], 1, tl.int64)
tmp19 = tmp17 < tmp18
tmp20 = tmp16 & tmp19
tmp21 = tmp14 | tmp20
tmp22 = tl.where(tmp21, tmp7, tmp8)
tmp23 = tl.where(tmp21, tmp17, tmp18)
tmp24 = tmp3 / tmp6
tmp25 = tmp22 > tmp24
tmp26 = tmp22 == tmp24
tmp27 = tmp22 != tmp22
tmp28 = tmp24 != tmp24
tmp29 = tmp27 > tmp28
tmp30 = tmp25 | tmp29
tmp31 = tmp27 & tmp28
tmp32 = tmp26 | tmp31
tmp33 = tl.full([1], 2, tl.int64)
tmp34 = tmp23 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tmp30 | tmp35
tmp37 = tl.where(tmp36, tmp22, tmp24)
tmp38 = tl.where(tmp36, tmp23, tmp33)
tmp39 = tmp5 / tmp6
tmp40 = tmp37 > tmp39
tmp41 = tmp37 == tmp39
tmp42 = tmp37 != tmp37
tmp43 = tmp39 != tmp39
tmp44 = tmp42 > tmp43
tmp45 = tmp40 | tmp44
tmp46 = tmp42 & tmp43
tmp47 = tmp41 | tmp46
tmp48 = tl.full([1], 3, tl.int64)
tmp49 = tmp38 < tmp48
tmp50 = tmp47 & tmp49
tmp51 = tmp45 | tmp50
tl.where(tmp51, tmp37, tmp39)
tmp53 = tl.where(tmp51, tmp38, tmp48)
tl.store(out_ptr0 + x0, tmp53, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(arg1_1, reinterpret_tensor(arg2_1, (64, 4), (4,
1), 0), reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf0)
del arg0_1
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_max_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf2,
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class SoftmaxOutputLayerNew(OutputLayer):
"""
Implements a softmax based output layer
"""
def loss(self, hidden, labels):
logits = self.output_projection(hidden)
log_probs = F.log_softmax(logits, -1)
return F.nll_loss(log_probs.view(-1, self.output_size), labels.view(-1)
)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| markiewagner/torchnlp | SoftmaxOutputLayer | false | 16,010 | [
"Apache-2.0"
] | 262 | 92f0a98c7c2b407508810834cbfd544214481695 | https://github.com/markiewagner/torchnlp/tree/92f0a98c7c2b407508810834cbfd544214481695 |
SelfAttention | # AOT ID: ['1_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/eg/cegcfgu2nkbhmj5wg4jmovsw6ufrg35tchikhmkt324e44bhzuuw.py
# Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div]
# Source node to ATen node mapping:
# queries_2 => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_10, 1.4142135623730951), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 4)) + (16*x2) + (64*(x1 // 4))), xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5w/c5wgyuixdgs4q67lp3jznv3am6mg5w4566ld452z3mowdpdxq3zq.py
# Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div]
# Source node to ATen node mapping:
# keys_2 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 1.4142135623730951), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x2 % 4)) + (16*x1) + (64*(x2 // 4))), xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dot_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dot_1 => div_2, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4h/c4hbvy7jxbw3dzzn56ed6w3oq5x3l5zczksk2h3ncwnjhu72g2m4.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_11 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%div, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_5 = async_compile.triton('triton_poi_fused_transpose_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (64*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf1, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf0, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [queries_2, dot], Original ATen: [aten.div, aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_5.run(buf3, buf11, 256, grid=grid(256), stream=stream0)
del buf3
return (reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), buf11, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class SelfAttention(nn.Module):
def __init__(self, input_size, heads, embed_size):
super().__init__()
self.input_size = input_size
self.heads = heads
self.emb_size = embed_size
self.tokeys = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.toqueries = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.tovalues = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
def forward(self, x):
b, t, hin = x.size()
assert hin == self.input_size, 'Input size {hin} should match {self.input_size}'
h = self.heads
e = self.emb_size
keys = self.tokeys(x).view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x).view(b, t, h, e)
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / e ** (1 / 4)
keys = keys / e ** (1 / 4)
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b * h, t, t)
dot = F.softmax(dot, dim=2)
out = torch.bmm(dot, values).view(b, h, t, e)
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'heads': 4, 'embed_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 4) + 16 * x2 + 64 * (x1 // 4)),
xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 4) + 16 * x1 + 64 * (x2 // 4)),
xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](buf1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_div_1[grid(256)](buf0, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16,
1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16,
4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0)
del buf9
triton_poi_fused_transpose_5[grid(256)](buf3, buf11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf3
return reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), buf11, buf4
class SelfAttentionNew(nn.Module):
def __init__(self, input_size, heads, embed_size):
super().__init__()
self.input_size = input_size
self.heads = heads
self.emb_size = embed_size
self.tokeys = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.toqueries = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
self.tovalues = nn.Linear(self.input_size, self.emb_size * heads,
bias=False)
def forward(self, input_0):
primals_2 = self.tokeys.weight
primals_3 = self.toqueries.weight
primals_4 = self.tovalues.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mariuslindegaard/6.867_MARL_project | SelfAttention | false | 16,011 | [
"Apache-2.0"
] | 401 | 572b88b4d491db8a1673535868f4bf9aff58f73d | https://github.com/mariuslindegaard/6.867_MARL_project/tree/572b88b4d491db8a1673535868f4bf9aff58f73d |
ReDynamicWeightsCat33 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pp/cppzwhavrbxqanhenab3phph2xb4f22v2zltxf5ldtyeh2jp7igd.py
# Topologically Sorted Source Nodes: [dynamic_filter1_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dynamic_filter1_1 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((16*r1) + (144*(x0 // 16)) + (x0 % 16)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (9*x0)), tmp11, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yu/cyu5u75oqgplo4p6f33zgdot6wehnhque5kb2bng573l3nmqmq7d.py
# Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_5 => clone_9
# Graph fragment:
# %clone_9 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = (xindex // 9)
y1 = (yindex // 16)
x5 = xindex
y4 = yindex
tmp0 = (-1) + (x2 // 3) + (y0 // 4)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + (x2 % 3) + (y0 % 4)
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + y0 + (4*(x2 // 3)) + (16*x3) + (64*y1) + (x2 % 3)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x5 + (36*y4)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tu/ctu3q2v6br2z7sqttxn7xznelkr5wtvssaqglgf63ojgmmplxb3w.py
# Topologically Sorted Source Nodes: [contiguous_7], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_7 => clone_11
# Graph fragment:
# %clone_11 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_11,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = (xindex // 9)
y1 = (yindex // 16)
x5 = xindex
y4 = yindex
tmp0 = (-4) + (4*(x2 // 3)) + (y0 // 4)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-4) + (4*(x2 % 3)) + (y0 % 4)
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-20) + y0 + (4*(x2 % 3)) + (16*x3) + (16*(x2 // 3)) + (64*y1)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x5 + (36*y4)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rg/crgqidobtxi4etomeqscr7velf6sv77ep3uojozmcmwo2sjdusjv.py
# Topologically Sorted Source Nodes: [contiguous_9], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_9 => clone_13
# Graph fragment:
# %clone_13 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_13,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = (xindex // 9)
y1 = (yindex // 16)
x5 = xindex
y4 = yindex
tmp0 = (-8) + (8*(x2 // 3)) + (y0 // 4)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-8) + (8*(x2 % 3)) + (y0 % 4)
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-40) + y0 + (8*(x2 % 3)) + (16*x3) + (32*(x2 // 3)) + (64*y1)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x5 + (36*y4)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5m/c5mqcc6l7mnbdnzd3cktjcg5rg4j33rs2iotm7qotnhrvtlblx2a.py
# Topologically Sorted Source Nodes: [contiguous_11], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_11 => clone_15
# Graph fragment:
# %clone_15 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_15,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = (xindex // 9)
y1 = (yindex // 16)
x5 = xindex
y4 = yindex
tmp0 = (-12) + (12*(x2 // 3)) + (y0 // 4)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-12) + (12*(x2 % 3)) + (y0 % 4)
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-60) + y0 + (12*(x2 % 3)) + (16*x3) + (48*(x2 // 3)) + (64*y1)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x5 + (36*y4)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7r/c7rukorz67iypawo5q2qpmqhsepre3uzqoeask53qmnm7ymbvdm4.py
# Topologically Sorted Source Nodes: [mul, add, mul_1, add_1, mul_2, add_2, mul_3, out], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add_8
# add_1 => add_9
# add_2 => add_10
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# out => add_11
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %view_26), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %view_29), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, %view_32), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %view_35), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_3), kwargs = {})
triton_poi_fused_add_mul_5 = async_compile.triton('triton_poi_fused_add_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK])
tmp3 = tl.load(in_ptr2 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, YBLOCK])
tmp8 = tl.load(in_ptr4 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr5 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, YBLOCK])
tmp13 = tl.load(in_ptr6 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + (0))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, YBLOCK])
tmp18 = tl.load(in_ptr8 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tmp14 = tmp12 * tmp13
tmp15 = tmp10 + tmp14
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tl.store(out_ptr0 + (y0 + (16*x2) + (64*y1)), tmp20, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [dynamic_filter1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 9, 4, 4), (144, 16, 4, 1))
# Topologically Sorted Source Nodes: [dynamic_filter2], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 9, 4, 4), (144, 16, 4, 1))
# Topologically Sorted Source Nodes: [dynamic_filter3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 9, 4, 4), (144, 16, 4, 1))
# Topologically Sorted Source Nodes: [dynamic_filter4], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_1, primals_5, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 9, 4, 4), (144, 16, 4, 1))
buf6 = empty_strided_cuda((64, 9), (9, 1), torch.float32)
# Topologically Sorted Source Nodes: [dynamic_filter1_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(buf0, buf6, 64, 9, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf0, (64, 9), (9, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [dynamic_filter2_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_0.run(buf1, buf9, 64, 9, grid=grid(64), stream=stream0)
buf12 = reinterpret_tensor(buf1, (64, 9), (9, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [dynamic_filter3_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_0.run(buf2, buf12, 64, 9, grid=grid(64), stream=stream0)
buf15 = reinterpret_tensor(buf2, (64, 9), (9, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [dynamic_filter4_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_0.run(buf3, buf15, 64, 9, grid=grid(64), stream=stream0)
del buf3
buf16 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf16, 64, 36, grid=grid(64, 36), stream=stream0)
buf17 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf16, (64, 4, 9), (36, 9, 1), 0), reinterpret_tensor(buf6, (64, 9, 1), (9, 1, 1), 0), out=buf17)
buf18 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_7], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(primals_1, buf18, 64, 36, grid=grid(64, 36), stream=stream0)
buf19 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf18, (64, 4, 9), (36, 9, 1), 0), reinterpret_tensor(buf9, (64, 9, 1), (9, 1, 1), 0), out=buf19)
buf20 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_9], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(primals_1, buf20, 64, 36, grid=grid(64, 36), stream=stream0)
buf21 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf20, (64, 4, 9), (36, 9, 1), 0), reinterpret_tensor(buf12, (64, 9, 1), (9, 1, 1), 0), out=buf21)
buf22 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_11], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(primals_1, buf22, 64, 36, grid=grid(64, 36), stream=stream0)
buf23 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (64, 4, 9), (36, 9, 1), 0), reinterpret_tensor(buf15, (64, 9, 1), (9, 1, 1), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, mul_1, add_1, mul_2, add_2, mul_3, out], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_5.run(primals_1, primals_6, buf17, primals_7, buf19, primals_8, buf21, primals_9, buf23, buf24, 64, 4, grid=grid(64, 4), stream=stream0)
return (buf24, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, buf6, buf9, buf12, buf15, buf17, buf19, buf21, buf23, reinterpret_tensor(buf22, (64, 9, 4), (36, 1, 9), 0), reinterpret_tensor(buf20, (64, 9, 4), (36, 1, 9), 0), reinterpret_tensor(buf18, (64, 9, 4), (36, 1, 9), 0), reinterpret_tensor(buf16, (64, 9, 4), (36, 1, 9), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((9, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((9, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((9, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((9, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
from torch import nn
from torch.nn.modules.utils import _pair
class DeformConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, deformable_groups=1, bias=False):
assert not bias
super(DeformConv, self).__init__()
self.with_bias = bias
assert in_channels % groups == 0, 'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, 'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels //
self.groups, *self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, offset):
return deform_conv(input, offset, self.weight, self.stride, self.
padding, self.dilation, self.groups, self.deformable_groups)
def __repr__(self):
return ''.join(['{}('.format(self.__class__.__name__),
'in_channels={}, '.format(self.in_channels),
'out_channels={}, '.format(self.out_channels),
'kernel_size={}, '.format(self.kernel_size), 'stride={}, '.
format(self.stride), 'dilation={}, '.format(self.dilation),
'padding={}, '.format(self.padding), 'groups={}, '.format(self.
groups), 'deformable_groups={}, '.format(self.deformable_groups
), 'bias={})'.format(self.with_bias)])
class DeformUnfold(nn.Module):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1,
deformable_groups=1, bias=False):
assert not bias
super(DeformUnfold, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.deformable_groups = deformable_groups
def forward(self, input, offset):
return deform_unfold(input, offset, self.kernel_size, self.stride,
self.padding, self.dilation, self.deformable_groups)
def __repr__(self):
return ''.join(['{}('.format(self.__class__.__name__),
'kernel_size={}, '.format(self.kernel_size), 'stride={}, '.
format(self.stride), 'dilation={}, '.format(self.dilation),
'padding={}, '.format(self.padding), 'deformable_groups={}, '.
format(self.deformable_groups)])
class ReDynamicWeightsCat33(nn.Module):
"""' a rigrous implementation but slow
"""
def __init__(self, channels, group=1, kernel=3, dilation=(1, 4, 8, 12),
shuffle=False, deform=None):
super(ReDynamicWeightsCat33, self).__init__()
in_channel = channels
if deform == 'deformatt':
self.off_conva = nn.Conv2d(in_channel, 18, 3, padding=dilation[
0], dilation=dilation[0], bias=False)
self.off_convb = nn.Conv2d(in_channel, 18, 3, padding=dilation[
1], dilation=dilation[1], bias=False)
self.off_convc = nn.Conv2d(in_channel, 18, 3, padding=dilation[
2], dilation=dilation[2], bias=False)
self.off_convd = nn.Conv2d(in_channel, 18, 3, padding=dilation[
3], dilation=dilation[3], bias=False)
self.kernel_conva = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[0], dilation=
dilation[0], bias=False)
self.kernel_convb = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[1], dilation=
dilation[1], bias=False)
self.kernel_convc = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[2], dilation=
dilation[2], bias=False)
self.kernel_convd = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[3], dilation=
dilation[3], bias=False)
self.unfold1 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[0], dilation=dilation[0])
self.unfold2 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[1], dilation=dilation[1])
self.unfold3 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[2], dilation=dilation[2])
self.unfold4 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[3], dilation=dilation[3])
elif deform == 'deform':
self.off_conva = nn.Conv2d(in_channel, 18, 3, padding=dilation[
0], dilation=dilation[0], bias=False)
self.off_convb = nn.Conv2d(in_channel, 18, 3, padding=dilation[
1], dilation=dilation[1], bias=False)
self.off_convc = nn.Conv2d(in_channel, 18, 3, padding=dilation[
2], dilation=dilation[2], bias=False)
self.off_convd = nn.Conv2d(in_channel, 18, 3, padding=dilation[
3], dilation=dilation[3], bias=False)
self.kernel_conva = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[0], dilation=
dilation[0], bias=False)
self.kernel_convb = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[1], dilation=
dilation[1], bias=False)
self.kernel_convc = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[2], dilation=
dilation[2], bias=False)
self.kernel_convd = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[3], dilation=
dilation[3], bias=False)
self.unfold1 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[0], dilation=dilation[0])
self.unfold2 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[1], dilation=dilation[1])
self.unfold3 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[2], dilation=dilation[2])
self.unfold4 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[3], dilation=dilation[3])
else:
self.cata = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[0], dilation=dilation[0], bias=False)
self.catb = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[1], dilation=dilation[1], bias=False)
self.catc = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[2], dilation=dilation[2], bias=False)
self.catd = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[3], dilation=dilation[3], bias=False)
self.unfold1 = nn.Unfold(kernel_size=(3, 3), padding=dilation[0
], dilation=dilation[0])
self.unfold2 = nn.Unfold(kernel_size=(3, 3), padding=dilation[1
], dilation=dilation[1])
self.unfold3 = nn.Unfold(kernel_size=(3, 3), padding=dilation[2
], dilation=dilation[2])
self.unfold4 = nn.Unfold(kernel_size=(3, 3), padding=dilation[3
], dilation=dilation[3])
self.softmax = nn.Softmax(dim=-1)
self.shuffle = shuffle
self.deform = deform
self.group = group
self.K = kernel * kernel
self.gamma1 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
self.gamma2 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
self.gamma3 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
self.gamma4 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
def forward(self, x):
blur_depth = x
N, C, H, W = x.size()
R = C // self.group
if self.deform == 'deformatt':
offset1 = self.off_conva(blur_depth)
offset2 = self.off_convb(blur_depth)
offset3 = self.off_convc(blur_depth)
offset4 = self.off_convd(blur_depth)
xd_unfold1 = self.unfold1(blur_depth, offset1)
xd_unfold2 = self.unfold2(blur_depth, offset2)
xd_unfold3 = self.unfold3(blur_depth, offset3)
xd_unfold4 = self.unfold4(blur_depth, offset4)
dynamic_filter_att1 = self.kernel_conva(blur_depth, offset1)
dynamic_filter_att2 = self.kernel_convb(blur_depth, offset2)
dynamic_filter_att3 = self.kernel_convc(blur_depth, offset3)
dynamic_filter_att4 = self.kernel_convd(blur_depth, offset4)
dynamic_filter1 = dynamic_filter_att1[:, :9 * self.group, :, :]
att1 = dynamic_filter_att1[:, -9:, :, :]
att1 = att1.view(N, -1, H * W).view(N, 1, -1, H * W).permute(0,
1, 3, 2).contiguous()
att1 = self.softmax(att1)
dynamic_filter2 = dynamic_filter_att2[:, :9 * self.group, :, :]
att2 = dynamic_filter_att2[:, -9:, :, :]
att2 = att2.view(N, -1, H * W).view(N, 1, -1, H * W).permute(0,
1, 3, 2).contiguous()
att2 = self.softmax(att2)
dynamic_filter3 = dynamic_filter_att3[:, :9 * self.group, :, :]
att3 = dynamic_filter_att3[:, -9:, :, :]
att3 = att3.view(N, -1, H * W).view(N, 1, -1, H * W).permute(0,
1, 3, 2).contiguous()
att3 = self.softmax(att3)
dynamic_filter4 = dynamic_filter_att4[:, :9 * self.group, :, :]
att4 = dynamic_filter4[:, -9:, :, :]
att4 = att4.view(N, -1, H * W).view(N, 1, -1, H * W).permute(0,
1, 3, 2).contiguous()
att4 = self.softmax(att4)
elif self.deform == 'deform':
offset1 = self.off_conva(blur_depth)
offset2 = self.off_convb(blur_depth)
offset3 = self.off_convc(blur_depth)
offset4 = self.off_convd(blur_depth)
xd_unfold1 = self.unfold1(blur_depth, offset1)
xd_unfold2 = self.unfold2(blur_depth, offset2)
xd_unfold3 = self.unfold3(blur_depth, offset3)
xd_unfold4 = self.unfold4(blur_depth, offset4)
dynamic_filter1 = self.kernel_conva(blur_depth, offset1)
dynamic_filter2 = self.kernel_convb(blur_depth, offset2)
dynamic_filter3 = self.kernel_convc(blur_depth, offset3)
dynamic_filter4 = self.kernel_convd(blur_depth, offset4)
else:
dynamic_filter1 = self.cata(blur_depth)
dynamic_filter2 = self.catb(blur_depth)
dynamic_filter3 = self.catc(blur_depth)
dynamic_filter4 = self.catd(blur_depth)
xd_unfold1 = self.unfold1(blur_depth)
xd_unfold2 = self.unfold2(blur_depth)
xd_unfold3 = self.unfold3(blur_depth)
xd_unfold4 = self.unfold4(blur_depth)
if self.deform == 'deformatt':
dynamic_filter1 = dynamic_filter1.view(N, self.group, self.K, -1
).permute(0, 1, 3, 2).contiguous()
dynamic_filter2 = dynamic_filter2.view(N, self.group, self.K, -1
).permute(0, 1, 3, 2).contiguous()
dynamic_filter3 = dynamic_filter3.view(N, self.group, self.K, -1
).permute(0, 1, 3, 2).contiguous()
dynamic_filter4 = dynamic_filter4.view(N, self.group, self.K, -1
).permute(0, 1, 3, 2).contiguous()
else:
dynamic_filter1 = self.softmax(dynamic_filter1.view(N, self.
group, self.K, -1).permute(0, 1, 3, 2).contiguous().view(-1,
self.K))
dynamic_filter2 = self.softmax(dynamic_filter2.view(N, self.
group, self.K, -1).permute(0, 1, 3, 2).contiguous().view(-1,
self.K))
dynamic_filter3 = self.softmax(dynamic_filter3.view(N, self.
group, self.K, -1).permute(0, 1, 3, 2).contiguous().view(-1,
self.K))
dynamic_filter4 = self.softmax(dynamic_filter4.view(N, self.
group, self.K, -1).permute(0, 1, 3, 2).contiguous().view(-1,
self.K))
if self.training and self.shuffle:
dynamic_filter1 = dynamic_filter1.view(N, self.group, H * W, self.K
).permute(1, 0, 2, 3).contiguous()
idx1 = torch.randperm(self.group)
dynamic_filter1 = dynamic_filter1[idx1].permute(1, 0, 2, 3
).contiguous().view(-1, self.K)
dynamic_filter2 = dynamic_filter2.view(N, self.group, H * W, self.K
).permute(1, 0, 2, 3).contiguous()
idx2 = torch.randperm(self.group)
dynamic_filter2 = dynamic_filter2[idx2].permute(1, 0, 2, 3
).contiguous().view(-1, self.K)
dynamic_filter3 = dynamic_filter3.view(N, self.group, H * W, self.K
).permute(1, 0, 2, 3).contiguous()
idx3 = torch.randperm(self.group)
dynamic_filter3 = dynamic_filter3[idx3].permute(1, 0, 2, 3
).contiguous().view(-1, self.K)
dynamic_filter4 = dynamic_filter4.view(N, self.group, H * W, self.K
).permute(1, 0, 2, 3).contiguous()
idx4 = torch.randperm(self.group)
dynamic_filter4 = dynamic_filter4[idx4].permute(1, 0, 2, 3
).contiguous().view(-1, self.K)
if self.deform == 'deformatt':
dynamic_filter1 = dynamic_filter1 * att1
dynamic_filter2 = dynamic_filter2 * att2
dynamic_filter3 = dynamic_filter3 * att3
dynamic_filter4 = dynamic_filter4 * att4
dynamic_filter1 = dynamic_filter1.view(-1, self.K)
dynamic_filter2 = dynamic_filter2.view(-1, self.K)
dynamic_filter3 = dynamic_filter3.view(-1, self.K)
dynamic_filter4 = dynamic_filter4.view(-1, self.K)
xd_unfold1 = xd_unfold1.view(N, C, self.K, H * W).permute(0, 1, 3, 2
).contiguous().view(N, self.group, R, H * W, self.K).permute(0,
1, 3, 2, 4).contiguous().view(N * self.group * H * W, R, self.K)
xd_unfold2 = xd_unfold2.view(N, C, self.K, H * W).permute(0, 1, 3, 2
).contiguous().view(N, self.group, R, H * W, self.K).permute(0,
1, 3, 2, 4).contiguous().view(N * self.group * H * W, R, self.K)
xd_unfold3 = xd_unfold3.view(N, C, self.K, H * W).permute(0, 1, 3, 2
).contiguous().view(N, self.group, R, H * W, self.K).permute(0,
1, 3, 2, 4).contiguous().view(N * self.group * H * W, R, self.K)
xd_unfold4 = xd_unfold4.view(N, C, self.K, H * W).permute(0, 1, 3, 2
).contiguous().view(N, self.group, R, H * W, self.K).permute(0,
1, 3, 2, 4).contiguous().view(N * self.group * H * W, R, self.K)
out1 = torch.bmm(xd_unfold1, dynamic_filter1.unsqueeze(2))
out1 = out1.view(N, self.group, H * W, R).permute(0, 1, 3, 2
).contiguous().view(N, self.group * R, H * W).view(N, self.
group * R, H, W)
out2 = torch.bmm(xd_unfold2, dynamic_filter2.unsqueeze(2))
out2 = out2.view(N, self.group, H * W, R).permute(0, 1, 3, 2
).contiguous().view(N, self.group * R, H * W).view(N, self.
group * R, H, W)
out3 = torch.bmm(xd_unfold3, dynamic_filter3.unsqueeze(2))
out3 = out3.view(N, self.group, H * W, R).permute(0, 1, 3, 2
).contiguous().view(N, self.group * R, H * W).view(N, self.
group * R, H, W)
out4 = torch.bmm(xd_unfold4, dynamic_filter4.unsqueeze(2))
out4 = out4.view(N, self.group, H * W, R).permute(0, 1, 3, 2
).contiguous().view(N, self.group * R, H * W).view(N, self.
group * R, H, W)
out = (x + self.gamma1 * out1 + self.gamma2 * out2 + self.gamma3 *
out3 + self.gamma4 * out4)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.utils.data
from torch import nn
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * r1 + 144 * (x0 // 16) + x0 % 16), rmask &
xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 9 * x0), tmp11, rmask & xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = xindex // 9
y1 = yindex // 16
x5 = xindex
y4 = yindex
tmp0 = -1 + x2 // 3 + y0 // 4
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x2 % 3 + y0 % 4
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + y0 + 4 * (x2 // 3) + 16 * x3 + 64 * y1 +
x2 % 3), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0
)
tl.store(out_ptr0 + (x5 + 36 * y4), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = xindex // 9
y1 = yindex // 16
x5 = xindex
y4 = yindex
tmp0 = -4 + 4 * (x2 // 3) + y0 // 4
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -4 + 4 * (x2 % 3) + y0 % 4
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-20 + y0 + 4 * (x2 % 3) + 16 * x3 + 16 * (x2 //
3) + 64 * y1), tmp10 & xmask & ymask, eviction_policy='evict_last',
other=0.0)
tl.store(out_ptr0 + (x5 + 36 * y4), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = xindex // 9
y1 = yindex // 16
x5 = xindex
y4 = yindex
tmp0 = -8 + 8 * (x2 // 3) + y0 // 4
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -8 + 8 * (x2 % 3) + y0 % 4
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-40 + y0 + 8 * (x2 % 3) + 16 * x3 + 32 * (x2 //
3) + 64 * y1), tmp10 & xmask & ymask, eviction_policy='evict_last',
other=0.0)
tl.store(out_ptr0 + (x5 + 36 * y4), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 9
y0 = yindex % 16
x3 = xindex // 9
y1 = yindex // 16
x5 = xindex
y4 = yindex
tmp0 = -12 + 12 * (x2 // 3) + y0 // 4
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -12 + 12 * (x2 % 3) + y0 % 4
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-60 + y0 + 12 * (x2 % 3) + 16 * x3 + 48 * (
x2 // 3) + 64 * y1), tmp10 & xmask & ymask, eviction_policy=
'evict_last', other=0.0)
tl.store(out_ptr0 + (x5 + 36 * y4), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK])
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, YBLOCK])
tmp8 = tl.load(in_ptr4 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr5 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, YBLOCK])
tmp13 = tl.load(in_ptr6 + (x2 + 4 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp16 = tl.load(in_ptr7 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, YBLOCK])
tmp18 = tl.load(in_ptr8 + (x2 + 4 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tmp14 = tmp12 * tmp13
tmp15 = tmp10 + tmp14
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp20, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (9, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 9, 4, 4), (144, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 9, 4, 4), (144, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(8, 8), dilation=(8, 8), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 9, 4, 4), (144, 16, 4, 1))
buf3 = extern_kernels.convolution(primals_1, primals_5, stride=(1,
1), padding=(12, 12), dilation=(12, 12), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 9, 4, 4), (144, 16, 4, 1))
buf6 = empty_strided_cuda((64, 9), (9, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf0, buf6, 64, 9, XBLOCK=1,
num_warps=2, num_stages=1)
buf9 = reinterpret_tensor(buf0, (64, 9), (9, 1), 0)
del buf0
triton_per_fused__softmax_0[grid(64)](buf1, buf9, 64, 9, XBLOCK=1,
num_warps=2, num_stages=1)
buf12 = reinterpret_tensor(buf1, (64, 9), (9, 1), 0)
del buf1
triton_per_fused__softmax_0[grid(64)](buf2, buf12, 64, 9, XBLOCK=1,
num_warps=2, num_stages=1)
buf15 = reinterpret_tensor(buf2, (64, 9), (9, 1), 0)
del buf2
triton_per_fused__softmax_0[grid(64)](buf3, buf15, 64, 9, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
buf16 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1),
torch.float32)
triton_poi_fused_clone_1[grid(64, 36)](primals_1, buf16, 64, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf16, (64, 4, 9), (36, 9, 1),
0), reinterpret_tensor(buf6, (64, 9, 1), (9, 1, 1), 0), out=buf17)
buf18 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1),
torch.float32)
triton_poi_fused_clone_2[grid(64, 36)](primals_1, buf18, 64, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf18, (64, 4, 9), (36, 9, 1),
0), reinterpret_tensor(buf9, (64, 9, 1), (9, 1, 1), 0), out=buf19)
buf20 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1),
torch.float32)
triton_poi_fused_clone_3[grid(64, 36)](primals_1, buf20, 64, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf20, (64, 4, 9), (36, 9, 1),
0), reinterpret_tensor(buf12, (64, 9, 1), (9, 1, 1), 0), out=buf21)
buf22 = empty_strided_cuda((4, 1, 16, 4, 9), (576, 1, 36, 9, 1),
torch.float32)
triton_poi_fused_clone_4[grid(64, 36)](primals_1, buf22, 64, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf22, (64, 4, 9), (36, 9, 1),
0), reinterpret_tensor(buf15, (64, 9, 1), (9, 1, 1), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_5[grid(64, 4)](primals_1, primals_6, buf17,
primals_7, buf19, primals_8, buf21, primals_9, buf23, buf24, 64,
4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
return (buf24, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, buf6, buf9, buf12,
buf15, buf17, buf19, buf21, buf23, reinterpret_tensor(buf22, (64, 9,
4), (36, 1, 9), 0), reinterpret_tensor(buf20, (64, 9, 4), (36, 1, 9
), 0), reinterpret_tensor(buf18, (64, 9, 4), (36, 1, 9), 0),
reinterpret_tensor(buf16, (64, 9, 4), (36, 1, 9), 0))
class DeformConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, deformable_groups=1, bias=False):
assert not bias
super(DeformConv, self).__init__()
self.with_bias = bias
assert in_channels % groups == 0, 'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, 'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels //
self.groups, *self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, offset):
return deform_conv(input, offset, self.weight, self.stride, self.
padding, self.dilation, self.groups, self.deformable_groups)
def __repr__(self):
return ''.join(['{}('.format(self.__class__.__name__),
'in_channels={}, '.format(self.in_channels),
'out_channels={}, '.format(self.out_channels),
'kernel_size={}, '.format(self.kernel_size), 'stride={}, '.
format(self.stride), 'dilation={}, '.format(self.dilation),
'padding={}, '.format(self.padding), 'groups={}, '.format(self.
groups), 'deformable_groups={}, '.format(self.deformable_groups
), 'bias={})'.format(self.with_bias)])
class DeformUnfold(nn.Module):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1,
deformable_groups=1, bias=False):
assert not bias
super(DeformUnfold, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.deformable_groups = deformable_groups
def forward(self, input, offset):
return deform_unfold(input, offset, self.kernel_size, self.stride,
self.padding, self.dilation, self.deformable_groups)
def __repr__(self):
return ''.join(['{}('.format(self.__class__.__name__),
'kernel_size={}, '.format(self.kernel_size), 'stride={}, '.
format(self.stride), 'dilation={}, '.format(self.dilation),
'padding={}, '.format(self.padding), 'deformable_groups={}, '.
format(self.deformable_groups)])
class ReDynamicWeightsCat33New(nn.Module):
"""' a rigrous implementation but slow
"""
def __init__(self, channels, group=1, kernel=3, dilation=(1, 4, 8, 12),
shuffle=False, deform=None):
super(ReDynamicWeightsCat33New, self).__init__()
in_channel = channels
if deform == 'deformatt':
self.off_conva = nn.Conv2d(in_channel, 18, 3, padding=dilation[
0], dilation=dilation[0], bias=False)
self.off_convb = nn.Conv2d(in_channel, 18, 3, padding=dilation[
1], dilation=dilation[1], bias=False)
self.off_convc = nn.Conv2d(in_channel, 18, 3, padding=dilation[
2], dilation=dilation[2], bias=False)
self.off_convd = nn.Conv2d(in_channel, 18, 3, padding=dilation[
3], dilation=dilation[3], bias=False)
self.kernel_conva = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[0], dilation=
dilation[0], bias=False)
self.kernel_convb = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[1], dilation=
dilation[1], bias=False)
self.kernel_convc = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[2], dilation=
dilation[2], bias=False)
self.kernel_convd = DeformConv(in_channel, group * kernel *
kernel + 9, kernel_size=3, padding=dilation[3], dilation=
dilation[3], bias=False)
self.unfold1 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[0], dilation=dilation[0])
self.unfold2 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[1], dilation=dilation[1])
self.unfold3 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[2], dilation=dilation[2])
self.unfold4 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[3], dilation=dilation[3])
elif deform == 'deform':
self.off_conva = nn.Conv2d(in_channel, 18, 3, padding=dilation[
0], dilation=dilation[0], bias=False)
self.off_convb = nn.Conv2d(in_channel, 18, 3, padding=dilation[
1], dilation=dilation[1], bias=False)
self.off_convc = nn.Conv2d(in_channel, 18, 3, padding=dilation[
2], dilation=dilation[2], bias=False)
self.off_convd = nn.Conv2d(in_channel, 18, 3, padding=dilation[
3], dilation=dilation[3], bias=False)
self.kernel_conva = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[0], dilation=
dilation[0], bias=False)
self.kernel_convb = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[1], dilation=
dilation[1], bias=False)
self.kernel_convc = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[2], dilation=
dilation[2], bias=False)
self.kernel_convd = DeformConv(in_channel, group * kernel *
kernel, kernel_size=3, padding=dilation[3], dilation=
dilation[3], bias=False)
self.unfold1 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[0], dilation=dilation[0])
self.unfold2 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[1], dilation=dilation[1])
self.unfold3 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[2], dilation=dilation[2])
self.unfold4 = DeformUnfold(kernel_size=(3, 3), padding=
dilation[3], dilation=dilation[3])
else:
self.cata = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[0], dilation=dilation[0], bias=False)
self.catb = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[1], dilation=dilation[1], bias=False)
self.catc = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[2], dilation=dilation[2], bias=False)
self.catd = nn.Conv2d(in_channel, group * kernel * kernel, 3,
padding=dilation[3], dilation=dilation[3], bias=False)
self.unfold1 = nn.Unfold(kernel_size=(3, 3), padding=dilation[0
], dilation=dilation[0])
self.unfold2 = nn.Unfold(kernel_size=(3, 3), padding=dilation[1
], dilation=dilation[1])
self.unfold3 = nn.Unfold(kernel_size=(3, 3), padding=dilation[2
], dilation=dilation[2])
self.unfold4 = nn.Unfold(kernel_size=(3, 3), padding=dilation[3
], dilation=dilation[3])
self.softmax = nn.Softmax(dim=-1)
self.shuffle = shuffle
self.deform = deform
self.group = group
self.K = kernel * kernel
self.gamma1 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
self.gamma2 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
self.gamma3 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
self.gamma4 = nn.Parameter(torch.FloatTensor(1).fill_(1.0))
def forward(self, input_0):
primals_6 = self.gamma1
primals_7 = self.gamma2
primals_8 = self.gamma3
primals_9 = self.gamma4
primals_2 = self.cata.weight
primals_3 = self.catb.weight
primals_4 = self.catc.weight
primals_5 = self.catd.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| lzrobots/dgmn | ReDynamicWeightsCat33 | false | 16,012 | [
"MIT"
] | 54 | 515476b5c6a07dcc3b7a4d2243c541377624bb33 | https://github.com/lzrobots/dgmn/tree/515476b5c6a07dcc3b7a4d2243c541377624bb33 |
SoftConvNotLearnedMask | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p3/cp3qleddjiuuytozrtebx5pzf2ycpwtw4mkq2jsx7qqswymv2bm6.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/le/clejzat6tlw5t2epnjhg2xo7xgkver7rs577ru5w7f4woetce22s.py
# Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs]
# Source node to ATen node mapping:
# abs_1 => abs_1
# Graph fragment:
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%primals_3,), kwargs = {})
triton_poi_fused_abs_2 = async_compile.triton('triton_poi_fused_abs_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2n/c2nqbixogoir2mqjvloq4y2nz3mixdxtu7bk6e5p3qh6al3tullz.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# k => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_sum_3 = async_compile.triton('triton_per_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mm/cmmnxzwbuonyoez6on5euzvzinxgivmne4egsrsut343rvm5svhv.py
# Topologically Sorted Source Nodes: [norm, add, new_mask], Original ATen: [aten.repeat, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# new_mask => div
# norm => repeat
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view_2, [4, 1, 1, 1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_1, %add), kwargs = {})
triton_poi_fused_add_div_repeat_4 = async_compile.triton('triton_poi_fused_add_div_repeat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_repeat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_repeat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = 1e-06
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs]
triton_poi_fused_abs_2.run(primals_3, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.sum]
triton_per_fused_sum_3.run(buf3, buf4, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_2, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_2
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, add, new_mask], Original ATen: [aten.repeat, aten.add, aten.div]
triton_poi_fused_add_div_repeat_4.run(buf5, buf4, buf6, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [], Original ATen: []
buf7 = torch.ops.aten.set_.source_Tensor(primals_5, buf3)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
del buf4
del buf5
del primals_5
return (buf2, buf6, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class SoftConvNotLearnedMask(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, 1, bias)
self.mask_update_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, 1, False)
self.input_conv.apply(weights_init('xavier'))
def forward(self, input, mask):
output = self.input_conv(input * mask)
with torch.no_grad():
self.mask_update_conv.weight = torch.nn.Parameter(self.
input_conv.weight.abs())
filters, _, _, _ = self.mask_update_conv.weight.shape
k = self.mask_update_conv.weight.view((filters, -1)).sum(1)
norm = k.view(1, -1, 1, 1).repeat(mask.shape[0], 1, 1, 1)
new_mask = self.mask_update_conv(mask) / (norm + 1e-06)
return output, new_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_abs_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_repeat_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = 1e-06
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_abs_2[grid(256)](primals_3, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_sum_3[grid(4)](buf3, buf4, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(primals_2, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_2
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_add_div_repeat_4[grid(16)](buf5, buf4, buf6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf7 = torch.ops.aten.set_.source_Tensor(primals_5, buf3)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
del buf4
del buf5
del primals_5
return buf2, buf6, primals_3, buf0
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class SoftConvNotLearnedMaskNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, 1, bias)
self.mask_update_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, 1, False)
self.input_conv.apply(weights_init('xavier'))
def forward(self, input_0, input_1):
primals_1 = self.input_conv.weight
primals_4 = self.input_conv.bias
primals_2 = self.mask_update_conv.weight
primals_3 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| marcelsan/Deep-HdrReconstruction | SoftConvNotLearnedMask | false | 16,013 | [
"BSD-3-Clause"
] | 80 | 7cb0d93938baa6fbe029116451a661c18dfba49e | https://github.com/marcelsan/Deep-HdrReconstruction/tree/7cb0d93938baa6fbe029116451a661c18dfba49e |
penalty_bce_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/27/c27cshavkwz6vyqehmnbt6fgbmbszwgylegw5dcebc2xw3u2lq3b.py
# Topologically Sorted Source Nodes: [neg, add, log, mul, sub, sub_1, add_1, log_1, mul_1, bce, bce_1, sum_1, bce_2], Original ATen: [aten.neg, aten.add, aten.log, aten.mul, aten.rsub, aten.sub, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# bce => sub_2
# bce_1 => mul_2
# bce_2 => div
# log => log
# log_1 => log_1
# mul => mul
# mul_1 => mul_1
# neg => neg
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-14), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %log), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-14), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {})
triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp14 = tl.load(in_ptr2 + (r0), None)
tmp1 = -tmp0
tmp3 = 1e-14
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp0
tmp9 = tmp7 - tmp2
tmp10 = tmp9 + tmp3
tmp11 = tl_math.log(tmp10)
tmp12 = tmp8 * tmp11
tmp13 = tmp6 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 0.00390625
tmp20 = tmp18 * tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, add, log, mul, sub, sub_1, add_1, log_1, mul_1, bce, bce_1, sum_1, bce_2], Original ATen: [aten.neg, aten.add, aten.log, aten.mul, aten.rsub, aten.sub, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0.run(buf1, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class penalty_bce_loss(nn.Module):
def __init__(self):
super(penalty_bce_loss, self).__init__()
def forward(self, y_pred, y_true, pmap):
B, C, W, H = y_pred.size()
bce = -y_true * torch.log(y_pred + 1e-14) - (1 - y_true) * torch.log(
1 - y_pred + 1e-14)
bce = bce * pmap
bce = torch.sum(bce) / (B * C * W * H)
return bce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp14 = tl.load(in_ptr2 + r0, None)
tmp1 = -tmp0
tmp3 = 1e-14
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp0
tmp9 = tmp7 - tmp2
tmp10 = tmp9 + tmp3
tmp11 = tl_math.log(tmp10)
tmp12 = tmp8 * tmp11
tmp13 = tmp6 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 0.00390625
tmp20 = tmp18 * tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0[grid(1)](buf1,
arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class penalty_bce_lossNew(nn.Module):
def __init__(self):
super(penalty_bce_lossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| manuel-rdz/SGL-Retinal-Vessel-Segmentation | penalty_bce_loss | false | 16,014 | [
"MIT"
] | 45 | 7897d977e77aa0b5d3acb86e0aa74c6829d67415 | https://github.com/manuel-rdz/SGL-Retinal-Vessel-Segmentation/tree/7897d977e77aa0b5d3acb86e0aa74c6829d67415 |
PCBActiv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p3/cp3qleddjiuuytozrtebx5pzf2ycpwtw4mkq2jsx7qqswymv2bm6.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bs/cbse4nrexqi2zskengrwepvzgn4r5txl6bxkfk62vqwxipzgli3p.py
# Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs]
# Source node to ATen node mapping:
# abs_1 => abs_1
# Graph fragment:
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%primals_3,), kwargs = {})
triton_poi_fused_abs_1 = async_compile.triton('triton_poi_fused_abs_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wq/cwq62r2pf5qkntfvln73l5nh23chkre3bpzcmjdltcven4g3psvr.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# k => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_sum_2 = async_compile.triton('triton_per_fused_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (36*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/eg/cegtplacmgyvtdjvrktbtfioxnod3otbaobvfskhphlacnekwevz.py
# Topologically Sorted Source Nodes: [norm, add, new_mask], Original ATen: [aten.repeat, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# new_mask => div
# norm => repeat
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view_2, [4, 1, 1, 1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_1, %add), kwargs = {})
triton_poi_fused_add_div_repeat_3 = async_compile.triton('triton_poi_fused_add_div_repeat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 1e-06
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uk/cuksa57ow4msvzqmh6rcfm45f35yxtl2zdm2melyp4zye2akg3c7.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs]
triton_poi_fused_abs_1.run(primals_3, buf2, 144, grid=grid(144), stream=stream0)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.sum]
triton_per_fused_sum_2.run(buf2, buf3, 4, 36, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_2, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
del primals_2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, add, new_mask], Original ATen: [aten.repeat, aten.add, aten.div]
triton_poi_fused_add_div_repeat_3.run(buf4, buf3, buf5, 256, grid=grid(256), stream=stream0)
buf6 = buf1; del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [], Original ATen: []
buf8 = torch.ops.aten.set_.source_Tensor(primals_4, buf2)
assert_size_stride(buf8, (4, 4, 3, 3), (36, 9, 3, 1))
del buf3
del buf4
del primals_4
return (buf6, buf5, primals_3, buf0, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class SoftConvNotLearnedMask(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, 1, bias)
self.mask_update_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, 1, False)
self.input_conv.apply(weights_init('xavier'))
def forward(self, input, mask):
output = self.input_conv(input * mask)
with torch.no_grad():
self.mask_update_conv.weight = torch.nn.Parameter(self.
input_conv.weight.abs())
filters, _, _, _ = self.mask_update_conv.weight.shape
k = self.mask_update_conv.weight.view((filters, -1)).sum(1)
norm = k.view(1, -1, 1, 1).repeat(mask.shape[0], 1, 1, 1)
new_mask = self.mask_update_conv(mask) / (norm + 1e-06)
return output, new_mask
class PCBActiv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ=
'relu', conv_bias=False):
super().__init__()
if sample == 'down-5':
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 5, 2, 2, bias
=conv_bias)
elif sample == 'down-7':
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 7, 2, 3, bias
=conv_bias)
elif sample == 'down-3':
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 3, 2, 1, bias
=conv_bias)
else:
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 3, 1, 1, bias
=conv_bias)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, input_mask):
h, h_mask = self.conv(input, input_mask)
if hasattr(self, 'activation'):
h = self.activation(h)
return h, h_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_abs_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused_sum_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 36 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_repeat_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = 1e-06
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_abs_1[grid(144)](primals_3, buf2, 144, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_sum_2[grid(4)](buf2, buf3, 4, 36, XBLOCK=1,
num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(primals_2, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
del primals_2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_repeat_3[grid(256)](buf4, buf3, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf6 = buf1
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(256)](buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = torch.ops.aten.set_.source_Tensor(primals_4, buf2)
assert_size_stride(buf8, (4, 4, 3, 3), (36, 9, 3, 1))
del buf3
del buf4
del primals_4
return buf6, buf5, primals_3, buf0, buf7
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class SoftConvNotLearnedMask(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, 1, bias)
self.mask_update_conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride, padding, dilation, 1, False)
self.input_conv.apply(weights_init('xavier'))
def forward(self, input, mask):
output = self.input_conv(input * mask)
with torch.no_grad():
self.mask_update_conv.weight = torch.nn.Parameter(self.
input_conv.weight.abs())
filters, _, _, _ = self.mask_update_conv.weight.shape
k = self.mask_update_conv.weight.view((filters, -1)).sum(1)
norm = k.view(1, -1, 1, 1).repeat(mask.shape[0], 1, 1, 1)
new_mask = self.mask_update_conv(mask) / (norm + 1e-06)
return output, new_mask
class PCBActivNew(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ=
'relu', conv_bias=False):
super().__init__()
if sample == 'down-5':
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 5, 2, 2, bias
=conv_bias)
elif sample == 'down-7':
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 7, 2, 3, bias
=conv_bias)
elif sample == 'down-3':
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 3, 2, 1, bias
=conv_bias)
else:
self.conv = SoftConvNotLearnedMask(in_ch, out_ch, 3, 1, 1, bias
=conv_bias)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input_0, input_1):
primals_3 = self.conv.input_conv.weight
primals_4 = self.conv.mask_update_conv.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| marcelsan/Deep-HdrReconstruction | PCBActiv | false | 16,015 | [
"BSD-3-Clause"
] | 80 | 7cb0d93938baa6fbe029116451a661c18dfba49e | https://github.com/marcelsan/Deep-HdrReconstruction/tree/7cb0d93938baa6fbe029116451a661c18dfba49e |
L0Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7t/c7tazjk5fa37gianh6dfhm42iddyawyqaiqbreo25suzz7ceeg4d.py
# Topologically Sorted Source Nodes: [sub, errors, max_1, mean], Original ATen: [aten.sub, aten.abs, aten.max, aten.mean]
# Source node to ATen node mapping:
# errors => abs_1
# max_1 => max_1
# mean => mean
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%abs_1, -1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {})
triton_per_fused_abs_max_mean_sub_0 = async_compile.triton('triton_per_fused_abs_max_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_max_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_max_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp22 = 64.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, errors, max_1, mean], Original ATen: [aten.sub, aten.abs, aten.max, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_max_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class L0Loss(torch.nn.Module):
def forward(self, suggested, target):
errors = (suggested - target).abs()
return torch.max(errors, dim=-1)[0].mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_max_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp22 = 64.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_max_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L0LossNew(torch.nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| martius-lab/CombOptNet | L0Loss | false | 16,016 | [
"MIT"
] | 46 | d563d31a95dce35a365d50b81f932c27531ae09b | https://github.com/martius-lab/CombOptNet/tree/d563d31a95dce35a365d50b81f932c27531ae09b |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/22/c2243krwvvvf4r4yarww4z2i5qpn4ituopmbv2ri27owefyawe3a.py
# Topologically Sorted Source Nodes: [add, relu], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# relu => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex % 256
x0 = xindex % 4
x3 = (xindex // 256)
x6 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x5), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(out_ptr0 + (x4), tmp8, xmask)
tl.store(out_ptr1 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/df/cdf4yunxzwhg2apeu35u7judmlotrbwvwododu4qvc6xrng7w2yb.py
# Topologically Sorted Source Nodes: [mul, attention_weighted_encoding], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attention_weighted_encoding => sum_2
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x1 = (xindex // 4) % 16
x3 = (xindex // 256)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x1 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp0 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp0 * tmp9
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + (x5), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, relu], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0.run(buf0, primals_2, buf1, primals_5, buf2, buf8, 1024, grid=grid(1024), stream=stream0)
del primals_2
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, attention_weighted_encoding], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(primals_3, buf6, buf7, 1024, grid=grid(1024), stream=stream0)
return (buf7, buf6, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (256, 4), (4, 1), 0), buf6, primals_7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Attention(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(Attention, self).__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(encoder_out)
att2 = self.decoder_att(decoder_hidden)
att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2)
alpha = self.softmax(att)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(
dim=1)
return attention_weighted_encoding, alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x6 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x1 = xindex // 4 % 16
x3 = xindex // 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp0 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp0 * tmp9
tmp11 = tmp8 + tmp10
tl.store(out_ptr0 + x5, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(1024)](buf0,
primals_2, buf1, primals_5, buf2, buf8, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_sum_3[grid(1024)](primals_3, buf6, buf7, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
return buf7, buf6, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0), reinterpret_tensor(buf2, (256, 4), (4, 1), 0
), buf6, primals_7, buf8
class AttentionNew(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(AttentionNew, self).__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
self.full_att = nn.Linear(attention_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1):
primals_1 = self.encoder_att.weight
primals_2 = self.encoder_att.bias
primals_4 = self.decoder_att.weight
primals_5 = self.decoder_att.bias
primals_7 = self.full_att.weight
primals_8 = self.full_att.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| marcoleewow/LaTeX_OCR | Attention | false | 16,017 | [
"Apache-2.0"
] | 290 | 0980ea719f8d3175a6bbf6af18873dd72d04b8c7 | https://github.com/marcoleewow/LaTeX_OCR/tree/0980ea719f8d3175a6bbf6af18873dd72d04b8c7 |
Project3D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/32/c32ppse2vdmak5is2nuwq2vbmvddtxyrdxkeqrxyec7bhptha7aa.py
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# cam_points => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = (xindex // 48)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p3/cp33b3l57w5jlxxjkbyxuiiw6erdw3sxbh2kc6ydjd74u3ubmxdx.py
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
# Source node to ATen node mapping:
# pix_coords_3 => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_16, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 2), kwargs = {})
triton_poi_fused_mul_sub_1 = async_compile.triton('triton_poi_fused_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + (48*x2)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + (x4), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf1, 192, grid=grid(192), stream=stream0)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
triton_poi_fused_mul_sub_1.run(buf2, buf3, 128, grid=grid(128), stream=stream0)
return (buf3, reinterpret_tensor(buf2, (4, 1, 4, 4), (48, 16, 4, 1), 32), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Project3D(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3D, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords, cam_points[:, 2, :].unsqueeze(1)
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = xindex // 48
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2
)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(buf2, (4, 1, 4, 4), (48, 16, 4, 1), 32)
class Project3DNew(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3DNew, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg2_1 = input_0
arg0_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| mattpoggi/depthstillation | Project3D | false | 16,018 | [
"MIT"
] | 122 | b74ea4343d8d9f082c82e9f72d9294200aea8bb7 | https://github.com/mattpoggi/depthstillation/tree/b74ea4343d8d9f082c82e9f72d9294200aea8bb7 |
AttentionSet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [temp1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# temp1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p7/cp7mgrng2aoot3kokspvn2sifs3rykgl5mktnpxnmb7yc57vcvab.py
# Topologically Sorted Source Nodes: [temp2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# temp2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4c/c4cno2b7lqeeqoapu3dmwm2jvkluhoyin4rew6ofmh4g3ap2a2bg.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 + tmp6
tmp8 = 0.9999000099990001
tmp9 = tmp7 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 2, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr1 + (x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp15 + tmp6
tmp17 = tmp16 * tmp8
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp12, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp11, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yq/cyqx5kglppxrerviqdwbdwcfae7px5dlsnu7vdce7slir7hp2hzq.py
# Topologically Sorted Source Nodes: [mul, mul_1, center], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# center => add_2
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %view_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (x2), xmask)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp2 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp9 = tmp5 / tmp8
tmp10 = tmp0 * tmp9
tmp12 = tmp7 / tmp8
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [temp2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp3], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp1_2], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_5, primals_6, buf4, 32, grid=grid(32), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm]
extern_kernels.mm(buf4, primals_3, out=buf5)
del primals_3
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [temp2_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf6, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp3_1], Original ATen: [aten.mm]
extern_kernels.mm(buf6, primals_4, out=buf7)
buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, buf7, buf8, 8, grid=grid(8), stream=stream0)
del buf3
del buf7
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, center], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_3.run(primals_1, buf8, primals_5, buf9, 16, grid=grid(16), stream=stream0)
return (buf9, primals_1, primals_5, buf2, buf6, buf8, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(buf4, (8, 4), (1, 8), 0), reinterpret_tensor(buf0, (8, 4), (1, 8), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, mode_dims, expand_dims, center_use_offset, att_type,
bn, nat, name='Real'):
super(Attention, self).__init__()
self.center_use_offset = center_use_offset
self.bn = bn
self.nat = nat
if center_use_offset:
self.atten_mats1 = nn.Parameter(torch.FloatTensor(expand_dims *
2, mode_dims))
else:
self.atten_mats1 = nn.Parameter(torch.FloatTensor(expand_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats1)
self.register_parameter('atten_mats1_%s' % name, self.atten_mats1)
if self.nat >= 2:
self.atten_mats1_1 = nn.Parameter(torch.FloatTensor(mode_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats1_1)
self.register_parameter('atten_mats1_1_%s' % name, self.
atten_mats1_1)
if self.nat >= 3:
self.atten_mats1_2 = nn.Parameter(torch.FloatTensor(mode_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats1_2)
self.register_parameter('atten_mats1_2_%s' % name, self.
atten_mats1_2)
if bn != 'no':
self.bn1 = nn.BatchNorm1d(mode_dims)
self.bn1_1 = nn.BatchNorm1d(mode_dims)
self.bn1_2 = nn.BatchNorm1d(mode_dims)
if att_type == 'whole':
self.atten_mats2 = nn.Parameter(torch.FloatTensor(mode_dims, 1))
elif att_type == 'ele':
self.atten_mats2 = nn.Parameter(torch.FloatTensor(mode_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats2)
self.register_parameter('atten_mats2_%s' % name, self.atten_mats2)
def forward(self, center_embed, offset_embed=None):
if self.center_use_offset:
temp1 = torch.cat([center_embed, offset_embed], dim=1)
else:
temp1 = center_embed
if self.nat >= 1:
if self.bn == 'no':
temp2 = F.relu(temp1.mm(self.atten_mats1))
elif self.bn == 'before':
temp2 = F.relu(self.bn1(temp1.mm(self.atten_mats1)))
elif self.bn == 'after':
temp2 = self.bn1(F.relu(temp1.mm(self.atten_mats1)))
if self.nat >= 2:
if self.bn == 'no':
temp2 = F.relu(temp2.mm(self.atten_mats1_1))
elif self.bn == 'before':
temp2 = F.relu(self.bn1_1(temp2.mm(self.atten_mats1_1)))
elif self.bn == 'after':
temp2 = self.bn1_1(F.relu(temp2.mm(self.atten_mats1_1)))
if self.nat >= 3:
if self.bn == 'no':
temp2 = F.relu(temp2.mm(self.atten_mats1_2))
elif self.bn == 'before':
temp2 = F.relu(self.bn1_2(temp2.mm(self.atten_mats1_2)))
elif self.bn == 'after':
temp2 = self.bn1_2(F.relu(temp2.mm(self.atten_mats1_2)))
temp3 = temp2.mm(self.atten_mats2)
return temp3
class AttentionSet(nn.Module):
def __init__(self, mode_dims, expand_dims, center_use_offset, att_reg=
0.0, att_tem=1.0, att_type='whole', bn='no', nat=1, name='Real'):
super(AttentionSet, self).__init__()
self.center_use_offset = center_use_offset
self.att_reg = att_reg
self.att_type = att_type
self.att_tem = att_tem
self.Attention_module = Attention(mode_dims, expand_dims,
center_use_offset, att_type=att_type, bn=bn, nat=nat)
def forward(self, embeds1, embeds1_o, embeds2, embeds2_o, embeds3=[],
embeds3_o=[]):
temp1 = (self.Attention_module(embeds1, embeds1_o) + self.att_reg) / (
self.att_tem + 0.0001)
temp2 = (self.Attention_module(embeds2, embeds2_o) + self.att_reg) / (
self.att_tem + 0.0001)
if len(embeds3) > 0:
temp3 = (self.Attention_module(embeds3, embeds3_o) + self.att_reg
) / (self.att_tem + 0.0001)
if self.att_type == 'whole':
combined = F.softmax(torch.cat([temp1, temp2, temp3], dim=1
), dim=1)
center = embeds1 * combined[:, 0].view(embeds1.size(0), 1
) + embeds2 * combined[:, 1].view(embeds2.size(0), 1
) + embeds3 * combined[:, 2].view(embeds3.size(0), 1)
elif self.att_type == 'ele':
combined = F.softmax(torch.stack([temp1, temp2, temp3]), dim=0)
center = embeds1 * combined[0] + embeds2 * combined[1
] + embeds3 * combined[2]
elif self.att_type == 'whole':
combined = F.softmax(torch.cat([temp1, temp2], dim=1), dim=1)
center = embeds1 * combined[:, 0].view(embeds1.size(0), 1
) + embeds2 * combined[:, 1].view(embeds2.size(0), 1)
elif self.att_type == 'ele':
combined = F.softmax(torch.stack([temp1, temp2]), dim=0)
center = embeds1 * combined[0] + embeds2 * combined[1]
return center
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'mode_dims': 4, 'expand_dims': 4, 'center_use_offset': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 0.0
tmp7 = tmp5 + tmp6
tmp8 = 0.9999000099990001
tmp9 = tmp7 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp15 = tl.load(in_ptr1 + x1, tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp15 + tmp6
tmp17 = tmp16 * tmp8
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp12, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp11, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x2, xmask)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp2 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp9 = tmp5 / tmp8
tmp10 = tmp0 * tmp9
tmp12 = tmp7 / tmp8
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 4), (4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_0[grid(32)](primals_5, primals_6, buf4, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, primals_3, out=buf5)
del primals_3
buf6 = buf5
del buf5
triton_poi_fused_relu_1[grid(16)](buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf6, primals_4, out=buf7)
buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused_cat_2[grid(8)](buf3, buf7, buf8, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del buf3
del buf7
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_3[grid(16)](primals_1, buf8, primals_5,
buf9, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf9, primals_1, primals_5, buf2, buf6, buf8, reinterpret_tensor(
primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(buf4, (8, 4), (1,
8), 0), reinterpret_tensor(buf0, (8, 4), (1, 8), 0)
class Attention(nn.Module):
def __init__(self, mode_dims, expand_dims, center_use_offset, att_type,
bn, nat, name='Real'):
super(Attention, self).__init__()
self.center_use_offset = center_use_offset
self.bn = bn
self.nat = nat
if center_use_offset:
self.atten_mats1 = nn.Parameter(torch.FloatTensor(expand_dims *
2, mode_dims))
else:
self.atten_mats1 = nn.Parameter(torch.FloatTensor(expand_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats1)
self.register_parameter('atten_mats1_%s' % name, self.atten_mats1)
if self.nat >= 2:
self.atten_mats1_1 = nn.Parameter(torch.FloatTensor(mode_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats1_1)
self.register_parameter('atten_mats1_1_%s' % name, self.
atten_mats1_1)
if self.nat >= 3:
self.atten_mats1_2 = nn.Parameter(torch.FloatTensor(mode_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats1_2)
self.register_parameter('atten_mats1_2_%s' % name, self.
atten_mats1_2)
if bn != 'no':
self.bn1 = nn.BatchNorm1d(mode_dims)
self.bn1_1 = nn.BatchNorm1d(mode_dims)
self.bn1_2 = nn.BatchNorm1d(mode_dims)
if att_type == 'whole':
self.atten_mats2 = nn.Parameter(torch.FloatTensor(mode_dims, 1))
elif att_type == 'ele':
self.atten_mats2 = nn.Parameter(torch.FloatTensor(mode_dims,
mode_dims))
nn.init.xavier_uniform(self.atten_mats2)
self.register_parameter('atten_mats2_%s' % name, self.atten_mats2)
def forward(self, center_embed, offset_embed=None):
if self.center_use_offset:
temp1 = torch.cat([center_embed, offset_embed], dim=1)
else:
temp1 = center_embed
if self.nat >= 1:
if self.bn == 'no':
temp2 = F.relu(temp1.mm(self.atten_mats1))
elif self.bn == 'before':
temp2 = F.relu(self.bn1(temp1.mm(self.atten_mats1)))
elif self.bn == 'after':
temp2 = self.bn1(F.relu(temp1.mm(self.atten_mats1)))
if self.nat >= 2:
if self.bn == 'no':
temp2 = F.relu(temp2.mm(self.atten_mats1_1))
elif self.bn == 'before':
temp2 = F.relu(self.bn1_1(temp2.mm(self.atten_mats1_1)))
elif self.bn == 'after':
temp2 = self.bn1_1(F.relu(temp2.mm(self.atten_mats1_1)))
if self.nat >= 3:
if self.bn == 'no':
temp2 = F.relu(temp2.mm(self.atten_mats1_2))
elif self.bn == 'before':
temp2 = F.relu(self.bn1_2(temp2.mm(self.atten_mats1_2)))
elif self.bn == 'after':
temp2 = self.bn1_2(F.relu(temp2.mm(self.atten_mats1_2)))
temp3 = temp2.mm(self.atten_mats2)
return temp3
class AttentionSetNew(nn.Module):
def __init__(self, mode_dims, expand_dims, center_use_offset, att_reg=
0.0, att_tem=1.0, att_type='whole', bn='no', nat=1, name='Real'):
super(AttentionSetNew, self).__init__()
self.center_use_offset = center_use_offset
self.att_reg = att_reg
self.att_type = att_type
self.att_tem = att_tem
self.Attention_module = Attention(mode_dims, expand_dims,
center_use_offset, att_type=att_type, bn=bn, nat=nat)
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.Attention_module.atten_mats1
primals_4 = self.Attention_module.atten_mats2
primals_1 = input_0
primals_2 = input_1
primals_5 = input_2
primals_6 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| marcos0318/query2box | AttentionSet | false | 16,019 | [
"MIT"
] | 140 | cc8b47e21a5addf17ee5a3c68412b638ef3956f3 | https://github.com/marcos0318/query2box/tree/cc8b47e21a5addf17ee5a3c68412b638ef3956f3 |
TransformerEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7m/c7my77j7miwq7j5yz26lhwtp4fyb6qiw2vuvksvbnxxhdrtuljuq.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add
# src_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %squeeze), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uy/cuyacfovgswdpyhlq2s2chxvljavfbdvz7wnuo2oaa6t6ewmxjgf.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add
# src_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %squeeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t2/ct2ew6bik7urm4vzmwlzemshafen24n6ggyig3fku6sj4dckl336.py
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# gelu => add_3, erf, mul_3, mul_4, mul_5
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, 0.5), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_4,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %add_3), kwargs = {})
triton_poi_fused_gelu_6 = async_compile.triton('triton_poi_fused_gelu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3m/c3mh4ag5y7d2kfw4id5vjhn3zjt2ucu33pwtmgndlspt4gg5cawj.py
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_2 => add_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5m/c5m2x4kwr66u6jzlkjcacrwhzqxhxsn3hv6ryzwol7bzp7uppnze.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_3 => add_5, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [1]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pg/cpgskb56mehof5k52uslszbldka4jbq52y6dhbe764xtjdj3lwxc.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_3 => add_5, add_6, mul_6, mul_7, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [1]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %getitem_9), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %primals_12), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %primals_13), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (2048, 4), (4, 1))
assert_size_stride(primals_9, (2048, ), (1, ))
assert_size_stride(primals_10, (4, 2048), (2048, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_2, (4, ), (1, ), 4), primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_2, (4, ), (1, ), 8), primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_1
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_4
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(primals_5, buf9, buf10, buf11, 4, grid=grid(4), stream=stream0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_5, buf9, buf10, buf11, primals_6, primals_7, buf12, 16, grid=grid(16), stream=stream0)
del primals_7
buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf12, reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_9
buf14 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
triton_poi_fused_gelu_6.run(buf13, buf14, 8192, grid=grid(8192), stream=stream0)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf15)
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf16, buf12, primals_11, 16, grid=grid(16), stream=stream0)
del primals_11
buf17 = buf11; del buf11 # reuse
buf18 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf16, buf17, buf18, 4, grid=grid(4), stream=stream0)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf16, buf17, buf18, primals_12, primals_13, buf19, 16, grid=grid(16), stream=stream0)
del buf17
del buf18
del primals_13
return (buf19, primals_5, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf13, buf14, buf16, primals_10, primals_8, primals_3, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from torch.nn import TransformerEncoderLayer
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.init import xavier_uniform_
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
from torch.nn.modules.normalization import LayerNorm
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
class TransformerEncoderLayer(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = F.gelu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: 'Tensor', src_mask: 'Optional[Tensor]'=None,
src_key_padding_mask: 'Optional[Tensor]'=None) ->Tensor:
"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class MultiheadAttention(nn.Module):
"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\\text{MultiHead}(Q, K, V) = \\text{Concat}(head_1,\\dots,head_h)W^O
\\text{where} head_i = \\text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: 'Optional[torch.Tensor]'
bias_v: 'Optional[torch.Tensor]'
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim ==
embed_dim)
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim,
embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, thes non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, self.in_proj_weight, self.
in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=True, q_proj_weight=self.
q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, self.in_proj_weight, self.
in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.init import xavier_uniform_
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
from torch.nn.modules.normalization import LayerNorm
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (2048, 4), (4, 1))
assert_size_stride(primals_9, (2048,), (1,))
assert_size_stride(primals_10, (4, 2048), (2048, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_5, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 4),
primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 8),
primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_1
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_4, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_4
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(4)](primals_5, buf9,
buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_5, buf9,
buf10, buf11, primals_6, primals_7, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.addmm(primals_9, buf12, reinterpret_tensor(primals_8,
(4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_9
buf14 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
triton_poi_fused_gelu_6[grid(8192)](buf13, buf14, 8192, XBLOCK=128,
num_warps=4, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (2048, 4),
(1, 2048), 0), out=buf15)
buf16 = buf15
del buf15
triton_poi_fused_add_7[grid(16)](buf16, buf12, primals_11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
buf17 = buf11
del buf11
buf18 = buf10
del buf10
triton_poi_fused_native_layer_norm_8[grid(4)](buf16, buf17, buf18,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(16)](buf16, buf17, buf18,
primals_12, primals_13, buf19, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf17
del buf18
del primals_13
return (buf19, primals_5, primals_6, primals_12, buf6,
reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf13,
buf14, buf16, primals_10, primals_8, primals_3, reinterpret_tensor(
buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4),
(1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(TransformerEncoderLayerNew, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = F.gelu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayerNew, self).__setstate__(state)
def forward(self, input_0):
primals_1 = self.self_attn.in_proj_weight
primals_2 = self.self_attn.in_proj_bias
primals_3 = self.self_attn.out_proj.weight
primals_4 = self.self_attn.out_proj.bias
primals_8 = self.linear1.weight
primals_9 = self.linear1.bias
primals_10 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.norm1.weight
primals_11 = self.norm1.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
class MultiheadAttention(nn.Module):
"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\\text{MultiHead}(Q, K, V) = \\text{Concat}(head_1,\\dots,head_h)W^O
\\text{where} head_i = \\text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: 'Optional[torch.Tensor]'
bias_v: 'Optional[torch.Tensor]'
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim ==
embed_dim)
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim,
embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, thes non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, self.in_proj_weight, self.
in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=True, q_proj_weight=self.
q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, self.in_proj_weight, self.
in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask)
| markovka17/efficient-dl-systems | TransformerEncoderLayer | false | 16,020 | [
"MIT"
] | 85 | 310d1471e72ba70a0892cf5c9653ade17f091be5 | https://github.com/markovka17/efficient-dl-systems/tree/310d1471e72ba70a0892cf5c9653ade17f091be5 |
PixelNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/uq/cuqlfvqfeznifrvz7odrw3ezlfid2vgqb7wezw6nc6yrrg5447bi.py
# Topologically Sorted Source Nodes: [norm, sqrt, norm_1, add, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.sqrt, aten.div, aten.add]
# Source node to ATen node mapping:
# add => add
# norm => pow_1, pow_2, sum_1
# norm_1 => div
# sqrt => full_default
# truediv_1 => div_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %full_default), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1e-06), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, sqrt, norm_1, add, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.sqrt, aten.div, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def pixel_norm(x, eps=1e-06):
"""Pixel Normalization.
This normalization is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
x (torch.Tensor): Tensor to be normalized.
eps (float, optional): Epsilon to avoid dividing zero.
Defaults to 1e-6.
Returns:
torch.Tensor: Normalized tensor.
"""
if torch.__version__ >= '1.7.0':
norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True)
else:
norm = torch.norm(x, p=2, dim=1, keepdim=True)
norm = norm / torch.sqrt(torch.tensor(x.shape[1]))
return x / (norm + eps)
class PixelNorm(nn.Module):
"""Pixel Normalization.
This module is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
eps (float, optional): Epsilon value. Defaults to 1e-6.
"""
_abbr_ = 'pn'
def __init__(self, in_channels=None, eps=1e-06):
super(PixelNorm, self).__init__()
self.eps = eps
def forward(self, x):
return pixel_norm(x, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_sqrt_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_sqrt_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def pixel_norm(x, eps=1e-06):
"""Pixel Normalization.
This normalization is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
x (torch.Tensor): Tensor to be normalized.
eps (float, optional): Epsilon to avoid dividing zero.
Defaults to 1e-6.
Returns:
torch.Tensor: Normalized tensor.
"""
if torch.__version__ >= '1.7.0':
norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True)
else:
norm = torch.norm(x, p=2, dim=1, keepdim=True)
norm = norm / torch.sqrt(torch.tensor(x.shape[1]))
return x / (norm + eps)
class PixelNormNew(nn.Module):
"""Pixel Normalization.
This module is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
eps (float, optional): Epsilon value. Defaults to 1e-6.
"""
_abbr_ = 'pn'
def __init__(self, in_channels=None, eps=1e-06):
super(PixelNormNew, self).__init__()
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| matrixgame2018/mmediting | PixelNorm | false | 16,021 | [
"Apache-2.0"
] | 1,884 | 5170a64a586cc876a5cb459fbfa0cf9b55bfa5fd | https://github.com/matrixgame2018/mmediting/tree/5170a64a586cc876a5cb459fbfa0cf9b55bfa5fd |
UnStackDelta | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 4, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 4, 16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class UnStackDelta(nn.Module):
"""Reverse of StackDelta"""
def __init__(self):
super().__init__()
def forward(self, x: 'torch.Tensor'):
assert x.dim() == 4
if x.requires_grad:
out = x.transpose(1, 2).contiguous()
else:
out = x.transpose_(1, 2).contiguous()
out = out.view(out.size(0), out.size(1), out.size(2) * out.size(3))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 4, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0),
class UnStackDeltaNew(nn.Module):
"""Reverse of StackDelta"""
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| maxwellzh/CAT | UnStackDelta | false | 16,022 | [
"Apache-2.0"
] | 237 | b1a9c3f95e84d968593a05bf8b176b5f77b8055e | https://github.com/maxwellzh/CAT/tree/b1a9c3f95e84d968593a05bf8b176b5f77b8055e |
HuberLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xc/cxcnht2dgml7hk6gpnwlpediyags6u44ka2nvp62so2se7o6mxnh.py
# Topologically Sorted Source Nodes: [sub, errors, mask, pow_1, mul, l2_errors, mul_1, invert, l1_errors, mul_2, combined_errors, mean, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.pow, aten.mul, aten.div, aten.bitwise_not, aten.add, aten.mean, aten.sum]
# Source node to ATen node mapping:
# combined_errors => add
# errors => abs_1
# invert => bitwise_not
# l1_errors => sub_1
# l2_errors => div
# mask => lt
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 0.3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0.3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%lt, %div), kwargs = {})
# %bitwise_not : [num_users=1] = call_function[target=torch.ops.aten.bitwise_not.default](args = (%lt,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.15), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bitwise_not, %sub_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [0]), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean,), kwargs = {})
triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp19 = tl.load(in_ptr0 + (64 + r0), None)
tmp20 = tl.load(in_ptr1 + (64 + r0), None)
tmp35 = tl.load(in_ptr0 + (128 + r0), None)
tmp36 = tl.load(in_ptr1 + (128 + r0), None)
tmp51 = tl.load(in_ptr0 + (192 + r0), None)
tmp52 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 0.3
tmp5 = tmp3 < tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp3
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 3.3333333333333335
tmp11 = tmp9 * tmp10
tmp12 = tmp6 * tmp11
tmp13 = tmp5 == 0
tmp14 = tmp13.to(tl.float32)
tmp15 = 0.15
tmp16 = tmp3 - tmp15
tmp17 = tmp14 * tmp16
tmp18 = tmp12 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tl_math.abs(tmp21)
tmp23 = tmp22 < tmp4
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 * tmp22
tmp26 = tmp25 * tmp8
tmp27 = tmp26 * tmp10
tmp28 = tmp24 * tmp27
tmp29 = tmp23 == 0
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp22 - tmp15
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp34 = tmp18 + tmp33
tmp37 = tmp35 - tmp36
tmp38 = tl_math.abs(tmp37)
tmp39 = tmp38 < tmp4
tmp40 = tmp39.to(tl.float32)
tmp41 = tmp38 * tmp38
tmp42 = tmp41 * tmp8
tmp43 = tmp42 * tmp10
tmp44 = tmp40 * tmp43
tmp45 = tmp39 == 0
tmp46 = tmp45.to(tl.float32)
tmp47 = tmp38 - tmp15
tmp48 = tmp46 * tmp47
tmp49 = tmp44 + tmp48
tmp50 = tmp34 + tmp49
tmp53 = tmp51 - tmp52
tmp54 = tl_math.abs(tmp53)
tmp55 = tmp54 < tmp4
tmp56 = tmp55.to(tl.float32)
tmp57 = tmp54 * tmp54
tmp58 = tmp57 * tmp8
tmp59 = tmp58 * tmp10
tmp60 = tmp56 * tmp59
tmp61 = tmp55 == 0
tmp62 = tmp61.to(tl.float32)
tmp63 = tmp54 - tmp15
tmp64 = tmp62 * tmp63
tmp65 = tmp60 + tmp64
tmp66 = tmp50 + tmp65
tmp67 = 4.0
tmp68 = tmp66 / tmp67
tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK])
tmp71 = tl.sum(tmp69, 1)[:, None]
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp71, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub, errors, mask, pow_1, mul, l2_errors, mul_1, invert, l1_errors, mul_2, combined_errors, mean, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.pow, aten.mul, aten.div, aten.bitwise_not, aten.add, aten.mean, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0.run(arg0_1, arg1_1, buf1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class HuberLoss(torch.nn.Module):
def __init__(self, beta=0.3):
self.beta = beta
super(HuberLoss, self).__init__()
def forward(self, suggested, target):
errors = torch.abs(suggested - target)
mask = errors < self.beta
l2_errors = 0.5 * errors ** 2 / self.beta
l1_errors = errors - 0.5 * self.beta
combined_errors = mask * l2_errors + ~mask * l1_errors
return combined_errors.mean(dim=0).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0(in_ptr0,
in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp19 = tl.load(in_ptr0 + (64 + r0), None)
tmp20 = tl.load(in_ptr1 + (64 + r0), None)
tmp35 = tl.load(in_ptr0 + (128 + r0), None)
tmp36 = tl.load(in_ptr1 + (128 + r0), None)
tmp51 = tl.load(in_ptr0 + (192 + r0), None)
tmp52 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 0.3
tmp5 = tmp3 < tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp3
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 3.3333333333333335
tmp11 = tmp9 * tmp10
tmp12 = tmp6 * tmp11
tmp13 = tmp5 == 0
tmp14 = tmp13.to(tl.float32)
tmp15 = 0.15
tmp16 = tmp3 - tmp15
tmp17 = tmp14 * tmp16
tmp18 = tmp12 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tl_math.abs(tmp21)
tmp23 = tmp22 < tmp4
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 * tmp22
tmp26 = tmp25 * tmp8
tmp27 = tmp26 * tmp10
tmp28 = tmp24 * tmp27
tmp29 = tmp23 == 0
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp22 - tmp15
tmp32 = tmp30 * tmp31
tmp33 = tmp28 + tmp32
tmp34 = tmp18 + tmp33
tmp37 = tmp35 - tmp36
tmp38 = tl_math.abs(tmp37)
tmp39 = tmp38 < tmp4
tmp40 = tmp39.to(tl.float32)
tmp41 = tmp38 * tmp38
tmp42 = tmp41 * tmp8
tmp43 = tmp42 * tmp10
tmp44 = tmp40 * tmp43
tmp45 = tmp39 == 0
tmp46 = tmp45.to(tl.float32)
tmp47 = tmp38 - tmp15
tmp48 = tmp46 * tmp47
tmp49 = tmp44 + tmp48
tmp50 = tmp34 + tmp49
tmp53 = tmp51 - tmp52
tmp54 = tl_math.abs(tmp53)
tmp55 = tmp54 < tmp4
tmp56 = tmp55.to(tl.float32)
tmp57 = tmp54 * tmp54
tmp58 = tmp57 * tmp8
tmp59 = tmp58 * tmp10
tmp60 = tmp56 * tmp59
tmp61 = tmp55 == 0
tmp62 = tmp61.to(tl.float32)
tmp63 = tmp54 - tmp15
tmp64 = tmp62 * tmp63
tmp65 = tmp60 + tmp64
tmp66 = tmp50 + tmp65
tmp67 = 4.0
tmp68 = tmp66 / tmp67
tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK])
tmp71 = tl.sum(tmp69, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp71, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_bitwise_not_div_lt_mean_mul_pow_sub_sum_0[grid
(1)](arg0_1, arg1_1, buf1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf1,
class HuberLossNew(torch.nn.Module):
def __init__(self, beta=0.3):
self.beta = beta
super(HuberLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| martius-lab/CombOptNet | HuberLoss | false | 16,023 | [
"MIT"
] | 46 | d563d31a95dce35a365d50b81f932c27531ae09b | https://github.com/martius-lab/CombOptNet/tree/d563d31a95dce35a365d50b81f932c27531ae09b |
WeldonPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lb/clb7usy7qfmld2idfcmg2r22i3nfxc6io5mv3kdkm6sxqnukrctc.py
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# sort => sort
# Graph fragment:
# %sort : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%view, 2, True), kwargs = {})
triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True)
tmp7 = tmp6.to(tl.int64)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp5, xmask)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y7/cy7svbrvgizoup47rqhzlbrrwnvehommsnpvckyc5k6loax47jcb.py
# Topologically Sorted Source Nodes: [sum_1, div, sum_2, yMin, add], Original ATen: [aten.sum, aten.div, aten.add]
# Source node to ATen node mapping:
# add => add
# div => div
# sum_1 => sum_1
# sum_2 => sum_2
# yMin => div_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_6, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_12, [2], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
triton_poi_fused_add_div_sum_1 = async_compile.triton('triton_poi_fused_add_div_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.int64)
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_sort_0.run(arg0_1, buf0, buf3, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, div, sum_2, yMin, add], Original ATen: [aten.sum, aten.div, aten.add]
triton_poi_fused_add_div_sum_1.run(buf0, buf2, 16, grid=grid(16), stream=stream0)
del buf0
return (reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 15), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WeldonPooling(nn.Module):
def __init__(self, nMax=1, nMin=None):
super(WeldonPooling, self).__init__()
self.nMax = nMax
if nMin is None:
self.nMin = nMax
else:
self.nMin = nMin
self.input = torch.Tensor()
self.output = torch.Tensor()
self.indicesMax = torch.Tensor()
self.indicesMin = torch.Tensor()
def forward(self, input):
self.batchSize = 0
self.numChannels = 0
self.h = 0
self.w = 0
if input.dim() == 4:
self.batchSize = input.size(0)
self.numChannels = input.size(1)
self.h = input.size(2)
self.w = input.size(3)
elif input.dim() == 3:
self.batchSize = 1
self.numChannels = input.size(0)
self.h = input.size(1)
self.w = input.size(2)
else:
None
self.input = input
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
x = input.view(self.batchSize, self.numChannels, self.h * self.w)
scoreSorted, indices = torch.sort(x, x.dim() - 1, True)
self.indicesMax = indices[:, :, 0:nMax]
self.output = torch.sum(scoreSorted[:, :, 0:nMax], dim=2, keepdim=True)
self.output = self.output.div(nMax)
if nMin > 0:
self.indicesMin = indices[:, :, self.h * self.w - nMin:self.h *
self.w]
yMin = torch.sum(scoreSorted[:, :, self.h * self.w - nMin:self.
h * self.w], 2, keepdim=True).div(nMin)
self.output = torch.add(self.output, yMin)
if input.dim() == 4:
self.output = self.output.view(self.batchSize, self.numChannels,
1, 1)
elif input.dim() == 3:
self.output = self.output.view(self.numChannels, 1, 1)
return self.output
def backward(self, grad_output, _indices_grad=None):
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
yMax = grad_output.clone().view(self.batchSize, self.numChannels, 1
).expand(self.batchSize, self.numChannels, nMax)
z = torch.zeros(self.batchSize, self.numChannels, self.h * self.w
).type_as(self.input)
z = z.scatter_(2, self.indicesMax, yMax).div(nMax)
if nMin > 0:
yMin = grad_output.clone().view(self.batchSize, self.numChannels, 1
).div(nMin).expand(self.batchSize, self.numChannels, nMin)
self.gradInput = z.scatter_(2, self.indicesMin, yMin).view(self
.batchSize, self.numChannels, self.h, self.w)
else:
self.gradInput = z.view(self.batchSize, self.numChannels, self.
h, self.w)
if self.input.dim() == 3:
self.gradInput = self.gradInput.view(self.numChannels, self.h,
self.w)
return self.gradInput
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable
=False, descending=True)
tmp7 = tmp6.to(tl.int64)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp5, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp7, xmask)
@triton.jit
def triton_poi_fused_add_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.int64)
get_raw_stream(0)
triton_per_fused_sort_0[grid(16)](arg0_1, buf0, buf3, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_add_div_sum_1[grid(16)](buf0, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0
), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 15
), reinterpret_tensor(buf3, (4, 4, 1), (64, 16, 1), 0)
class WeldonPoolingNew(nn.Module):
def __init__(self, nMax=1, nMin=None):
super(WeldonPoolingNew, self).__init__()
self.nMax = nMax
if nMin is None:
self.nMin = nMax
else:
self.nMin = nMin
self.input = torch.Tensor()
self.output = torch.Tensor()
self.indicesMax = torch.Tensor()
self.indicesMin = torch.Tensor()
def backward(self, grad_output, _indices_grad=None):
nMax = self.nMax
if nMax <= 0:
nMax = 0
elif nMax < 1:
nMax = torch.clamp(torch.floor(nMax * self.h * self.w), min=1)
nMin = self.nMin
if nMin <= 0:
nMin = 0
elif nMin < 1:
nMin = torch.clamp(torch.floor(nMin * self.h * self.w), min=1)
yMax = grad_output.clone().view(self.batchSize, self.numChannels, 1
).expand(self.batchSize, self.numChannels, nMax)
z = torch.zeros(self.batchSize, self.numChannels, self.h * self.w
).type_as(self.input)
z = z.scatter_(2, self.indicesMax, yMax).div(nMax)
if nMin > 0:
yMin = grad_output.clone().view(self.batchSize, self.numChannels, 1
).div(nMin).expand(self.batchSize, self.numChannels, nMin)
self.gradInput = z.scatter_(2, self.indicesMin, yMin).view(self
.batchSize, self.numChannels, self.h, self.w)
else:
self.gradInput = z.view(self.batchSize, self.numChannels, self.
h, self.w)
if self.input.dim() == 3:
self.gradInput = self.gradInput.view(self.numChannels, self.h,
self.w)
return self.gradInput
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| maxgreat/dsve-loc | WeldonPooling | false | 16,024 | [
"BSD-3-Clause-Clear"
] | 56 | dd6807d02c0d5fd3e215be8e5c7a88e73102e561 | https://github.com/maxgreat/dsve-loc/tree/dd6807d02c0d5fd3e215be8e5c7a88e73102e561 |
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dw/cdwyx742hy7mnf755bi5wotiz44x7xxsxrkqosaq6ntl5d5ccmx7.py
# Topologically Sorted Source Nodes: [diag_s, add, cost_s, cost_s_1, sum_1, diag_im, add_1, cost_im, cost_im_1, sum_2, add_2], Original ATen: [aten.diag_embed, aten.add, aten.clamp, aten.sub, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# cost_im => clamp_min_1
# cost_im_1 => sub_3
# cost_s => clamp_min
# cost_s_1 => sub_2
# diag_im => eq_1, full_default_1, iota_2, where_1
# diag_s => eq, full_default, iota, where
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %mm), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute_1, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %where), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_2, %unsqueeze_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, %mm), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %permute_2, %full_default_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %where_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_clamp_diag_embed_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_diag_embed_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_diag_embed_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_diag_embed_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (r2), None)
tmp17 = tl.load(in_ptr0 + (5*r1), None, eviction_policy='evict_last')
tmp1 = 0.2
tmp2 = tmp1 - tmp0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = r0
tmp8 = r1
tmp9 = tmp7 == tmp8
tmp10 = tmp2 + tmp0
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tl.where(tmp9, tmp11, tmp5)
tmp13 = tmp6 - tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tmp1 - tmp17
tmp19 = tmp18 + tmp3
tmp20 = triton_helpers.maximum(tmp19, tmp5)
tmp21 = tmp20 - tmp12
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp16 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [diag_s, add, cost_s, cost_s_1, sum_1, diag_im, add_1, cost_im, cost_im_1, sum_2, add_2], Original ATen: [aten.diag_embed, aten.add, aten.clamp, aten.sub, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_diag_embed_sub_sum_0.run(buf3, buf0, 1, 16, grid=grid(1), stream=stream0)
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ContrastiveLoss(nn.Module):
def __init__(self, margin=0.2):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
cost_s = torch.clamp((self.margin - diag).expand_as(scores) +
scores, min=0)
cost_im = torch.clamp((self.margin - diag.view(-1, 1)).expand_as(
scores) + scores, min=0)
diag_s = torch.diag(cost_s.diag())
diag_im = torch.diag(cost_im.diag())
cost_s = cost_s - diag_s
cost_im = cost_im - diag_im
return cost_s.sum() + cost_im.sum()
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_clamp_diag_embed_sub_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + r2, None)
tmp17 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last')
tmp1 = 0.2
tmp2 = tmp1 - tmp0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = r0
tmp8 = r1
tmp9 = tmp7 == tmp8
tmp10 = tmp2 + tmp0
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tl.where(tmp9, tmp11, tmp5)
tmp13 = tmp6 - tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tmp1 - tmp17
tmp19 = tmp18 + tmp3
tmp20 = triton_helpers.maximum(tmp19, tmp5)
tmp21 = tmp20 - tmp12
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp16 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_clamp_diag_embed_sub_sum_0[grid(1)](buf3, buf0,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf3,
class ContrastiveLossNew(nn.Module):
def __init__(self, margin=0.2):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| maxgreat/dsve-loc | ContrastiveLoss | false | 16,025 | [
"BSD-3-Clause-Clear"
] | 56 | dd6807d02c0d5fd3e215be8e5c7a88e73102e561 | https://github.com/maxgreat/dsve-loc/tree/dd6807d02c0d5fd3e215be8e5c7a88e73102e561 |
SoftBinaryCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3i/c3iem2kgj55wummgvsk5u3cfh4cm3zigrwqxrhjgpljindydpofz.py
# Topologically Sorted Source Nodes: [l, logits], Original ATen: [aten.binary_cross_entropy_with_logits, aten.div]
# Source node to ATen node mapping:
# l => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# logits => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %div), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %div), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%div,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_div_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp1
tmp5 = tmp2 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp6, tmp4)
tmp8 = tl_math.abs(tmp4)
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 - tmp11
tmp13 = tmp5 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [l, logits], Original ATen: [aten.binary_cross_entropy_with_logits, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class SoftBinaryCrossEntropyLoss(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.bce_logit = torch.nn.BCEWithLogitsLoss()
def forward(self, pred, true):
logits = pred / self.tau
l = self.bce_logit(logits, true)
return l
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp1
tmp5 = tmp2 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp6, tmp4)
tmp8 = tl_math.abs(tmp4)
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 - tmp11
tmp13 = tmp5 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SoftBinaryCrossEntropyLossNew(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.bce_logit = torch.nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mfredriksz/semanticGAN_code | SoftBinaryCrossEntropyLoss | false | 16,026 | [
"BSD-2-Clause",
"MIT"
] | 107 | c6e7b490086afd8a7593e2892452295555910494 | https://github.com/mfredriksz/semanticGAN_code/tree/c6e7b490086afd8a7593e2892452295555910494 |
FeatureCorrelation | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sy/csyhswlvyuo2qzwowqiku2qw4gif3we4ukgp4arsneopazxj4fpo.py
# Topologically Sorted Source Nodes: [relu, pow_1, sum_1, correlation_tensor_1], Original ATen: [aten.relu, aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# correlation_tensor_1 => div
# pow_1 => pow_1
# relu => relu
# sum_1 => sum_1
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_3,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %expand), kwargs = {})
triton_per_fused_div_pow_relu_sum_1 = async_compile.triton('triton_per_fused_div_pow_relu_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_relu_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_pow_relu_sum_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp2 / tmp10
tl.store(out_ptr1 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [feature_mul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), out=buf1)
del arg1_1
del buf0
buf3 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [relu, pow_1, sum_1, correlation_tensor_1], Original ATen: [aten.relu, aten.pow, aten.sum, aten.div]
triton_per_fused_div_pow_relu_sum_1.run(buf1, buf3, 64, 16, grid=grid(64), stream=stream0)
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn
def featureL2Norm(feature):
epsilon = 1e-06
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5
).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureCorrelation(torch.nn.Module):
def __init__(self, shape='3D', normalization=True):
super(FeatureCorrelation, self).__init__()
self.normalization = normalization
self.shape = shape
self.ReLU = nn.ReLU()
def forward(self, feature_A, feature_B):
if self.shape == '3D':
b, c, h, w = feature_A.size()
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w
)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
feature_mul = torch.bmm(feature_B, feature_A)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(
2, 3).transpose(1, 2)
elif self.shape == '4D':
b, c, hA, wA = feature_A.size()
b, c, hB, wB = feature_B.size()
feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2)
feature_B = feature_B.view(b, c, hB * wB)
feature_mul = torch.bmm(feature_A, feature_B)
correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze(
1)
if self.normalization:
correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor))
return correlation_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_div_pow_relu_sum_1(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp2 / tmp10
tl.store(out_ptr1 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1,
16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0),
out=buf1)
del arg1_1
del buf0
buf3 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.
float32)
triton_per_fused_div_pow_relu_sum_1[grid(64)](buf1, buf3, 64, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del buf1
return buf3,
def featureL2Norm(feature):
epsilon = 1e-06
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5
).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureCorrelationNew(torch.nn.Module):
def __init__(self, shape='3D', normalization=True):
super(FeatureCorrelationNew, self).__init__()
self.normalization = normalization
self.shape = shape
self.ReLU = nn.ReLU()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mcimpoi/ncnet | FeatureCorrelation | false | 16,027 | [
"MIT"
] | 159 | d801df77154bce9e5653090273aacb0e588fa4ea | https://github.com/mcimpoi/ncnet/tree/d801df77154bce9e5653090273aacb0e588fa4ea |
Policy | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fj/cfjqcl5zyffb5ly6gkhpzdblek24xftrdhp34wnn56hi434h2szb.py
# Topologically Sorted Source Nodes: [alphas], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alphas => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (6, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((6, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [alphas], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(primals_1, buf0, 12, grid=grid(12), stream=stream0)
del primals_1
return (buf0, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 2), (2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
from copy import deepcopy
import torch.nn.parallel
import torch.optim
class Policy(nn.Module):
def __init__(self, max_nodes, search_space):
super(Policy, self).__init__()
self.max_nodes = max_nodes
self.search_space = deepcopy(search_space)
self.edge2index = {}
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
self.edge2index[node_str] = len(self.edge2index)
self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self.
edge2index), len(search_space)))
def generate_arch(self, actions):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = self.search_space[actions[self.edge2index[node_str]]]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.search_space[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def forward(self):
alphas = nn.functional.softmax(self.arch_parameters, dim=-1)
return alphas
def get_inputs():
return []
def get_init_inputs():
return [[], {'max_nodes': 4, 'search_space': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils
from copy import deepcopy
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (6, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((6, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(12)](primals_1, buf0, 12, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_1
return buf0, buf0
class PolicyNew(nn.Module):
def __init__(self, max_nodes, search_space):
super(PolicyNew, self).__init__()
self.max_nodes = max_nodes
self.search_space = deepcopy(search_space)
self.edge2index = {}
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
self.edge2index[node_str] = len(self.edge2index)
self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self.
edge2index), len(search_space)))
def generate_arch(self, actions):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = self.search_space[actions[self.edge2index[node_str]]]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.search_space[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def forward(self):
primals_1 = self.arch_parameters
output = call([primals_1])
return output[0]
| megvii-model/AngleNAS | Policy | false | 16,028 | [
"MIT"
] | 53 | c4cb189f04450db43e2014e178aa8a20ef5b316e | https://github.com/megvii-model/AngleNAS/tree/c4cb189f04450db43e2014e178aa8a20ef5b316e |
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/id/cidyrm7dvcqnvrurbocvast6bezwtxnk7no2mcfz3ehsgyah5lnx.py
# Topologically Sorted Source Nodes: [x_2, residual_output, activated_output], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# activated_output => relu_1
# residual_output => add
# x_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_6), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2, residual_output, activated_output], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf3, primals_5, primals_6, buf4, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
del primals_6
return (buf4, buf3, primals_1, primals_3, primals_4, buf1, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from typing import Tuple
def conv3x3(in_channels: 'int', out_channels: 'int', stride: 'int'=1,
padding: 'int'=1) ->nn.Module:
conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=True)
nn.init.xavier_normal_(conv.weight)
if conv.bias is not None:
conv.bias.data.fill_(0)
return conv
class ResBlock(nn.Module):
def __init__(self, features: 'int'):
super(ResBlock, self).__init__()
self.conv1 = conv3x3(features, features)
self.relu1 = nn.ReLU()
self.conv2 = conv3x3(features, features)
self.relu2 = nn.ReLU()
def forward(self, activated_input: 'torch.Tensor', residual_input:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
x = self.conv1(activated_input)
x = self.relu1(x)
x = self.conv2(x)
residual_output = x + residual_input
activated_output = self.relu2(residual_output)
return activated_output, residual_output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf3, primals_5, primals_6, buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
del primals_6
return buf4, buf3, primals_1, primals_3, primals_4, buf1, buf5
def conv3x3(in_channels: 'int', out_channels: 'int', stride: 'int'=1,
padding: 'int'=1) ->nn.Module:
conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=True)
nn.init.xavier_normal_(conv.weight)
if conv.bias is not None:
conv.bias.data.fill_(0)
return conv
class ResBlockNew(nn.Module):
def __init__(self, features: 'int'):
super(ResBlockNew, self).__init__()
self.conv1 = conv3x3(features, features)
self.relu1 = nn.ReLU()
self.conv2 = conv3x3(features, features)
self.relu2 = nn.ReLU()
def forward(self, input_0, input_1):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| mdornseif/fastface | ResBlock | false | 16,029 | [
"MIT"
] | 72 | 72772db1fae4af17e829cd5479c4848fe5eb8948 | https://github.com/mdornseif/fastface/tree/72772db1fae4af17e829cd5479c4848fe5eb8948 |
AffineGridGen | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ps/cpsodef265fez7yctn2j3nynklbbmfuxn7bi57zsgzlanh73sk5f.py
# Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator]
# Source node to ATen node mapping:
# affine_grid => add_3, constant_pad_nd_2, full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %constant_pad_nd_2 : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%full_default, [2, 0], 0.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %constant_pad_nd_2), kwargs = {})
triton_poi_fused_affine_grid_generator_0 = async_compile.triton('triton_poi_fused_affine_grid_generator_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_affine_grid_generator_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_affine_grid_generator_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 172800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 240
x2 = (xindex // 720)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x1
tmp4 = tmp3.to(tl.float32)
tmp5 = 120.0
tmp6 = tmp4 < tmp5
tmp7 = 0.008333333333333333
tmp8 = tmp4 * tmp7
tmp9 = -0.9958333333333333
tmp10 = tmp8 + tmp9
tmp11 = 239 + ((-1)*x1)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp7
tmp14 = 0.9958333333333333
tmp15 = tmp14 - tmp13
tmp16 = tl.where(tmp6, tmp10, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp2, tmp16, tmp17)
tmp19 = (-1) + x0
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tmp19 >= tmp20
tmp22 = tmp19 < tmp1
tmp23 = tmp21 & tmp22
tmp24 = x2
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp25 < tmp5
tmp27 = tmp25 * tmp7
tmp28 = tmp27 + tmp9
tmp29 = 239 + ((-1)*x2)
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp30 * tmp7
tmp32 = tmp14 - tmp31
tmp33 = tl.where(tmp26, tmp28, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp23, tmp33, tmp34)
tmp36 = tmp18 + tmp35
tmp37 = (-2) + x0
tmp38 = tmp37 >= tmp20
tmp39 = 1.0
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp38, tmp39, tmp40)
tmp42 = tmp36 + tmp41
tl.store(out_ptr0 + (x5), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/72/c72ko7bwrnxcz33klyfokecvp4hnpe3scztiorsnr3founy7q6m3.py
# Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator]
# Source node to ATen node mapping:
# affine_grid => mul_4, sum_1
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %unsqueeze), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-2]), kwargs = {})
triton_poi_fused_affine_grid_generator_1 = async_compile.triton('triton_poi_fused_affine_grid_generator_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_affine_grid_generator_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_affine_grid_generator_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 2) % 57600
x0 = xindex % 2
x2 = (xindex // 115200)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (3*x1), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((3*x0) + (6*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (3*x1)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (3*x0) + (6*x2)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (3*x1)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (3*x0) + (6*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 2, 3), (6, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((240, 240, 3), (720, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator]
stream0 = get_raw_stream(0)
triton_poi_fused_affine_grid_generator_0.run(buf1, 172800, grid=grid(172800), stream=stream0)
buf2 = empty_strided_cuda((4, 57600, 2), (115200, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator]
triton_poi_fused_affine_grid_generator_1.run(buf1, arg0_1, buf2, 460800, grid=grid(460800), stream=stream0)
del arg0_1
del buf1
return (reinterpret_tensor(buf2, (4, 240, 240, 2), (115200, 480, 2, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 2, 3), (6, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn.functional as F
import torch.nn
from torch.nn.modules.module import Module
class AffineGridGen(Module):
def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True):
super(AffineGridGen, self).__init__()
self.out_h = out_h
self.out_w = out_w
self.out_ch = out_ch
def forward(self, theta):
b = theta.size()[0]
if not theta.size() == (b, 2, 3):
theta = theta.view(-1, 2, 3)
theta = theta.contiguous()
batch_size = theta.size()[0]
out_size = torch.Size((batch_size, self.out_ch, self.out_h, self.out_w)
)
return F.affine_grid(theta, out_size)
def get_inputs():
return [torch.rand([4, 2, 3])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_affine_grid_generator_0(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 172800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 240
x2 = xindex // 720
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x1
tmp4 = tmp3.to(tl.float32)
tmp5 = 120.0
tmp6 = tmp4 < tmp5
tmp7 = 0.008333333333333333
tmp8 = tmp4 * tmp7
tmp9 = -0.9958333333333333
tmp10 = tmp8 + tmp9
tmp11 = 239 + -1 * x1
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp7
tmp14 = 0.9958333333333333
tmp15 = tmp14 - tmp13
tmp16 = tl.where(tmp6, tmp10, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp2, tmp16, tmp17)
tmp19 = -1 + x0
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tmp19 >= tmp20
tmp22 = tmp19 < tmp1
tmp23 = tmp21 & tmp22
tmp24 = x2
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp25 < tmp5
tmp27 = tmp25 * tmp7
tmp28 = tmp27 + tmp9
tmp29 = 239 + -1 * x2
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp30 * tmp7
tmp32 = tmp14 - tmp31
tmp33 = tl.where(tmp26, tmp28, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp23, tmp33, tmp34)
tmp36 = tmp18 + tmp35
tmp37 = -2 + x0
tmp38 = tmp37 >= tmp20
tmp39 = 1.0
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp38, tmp39, tmp40)
tmp42 = tmp36 + tmp41
tl.store(out_ptr0 + x5, tmp42, xmask)
@triton.jit
def triton_poi_fused_affine_grid_generator_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 2 % 57600
x0 = xindex % 2
x2 = xindex // 115200
x3 = xindex
tmp0 = tl.load(in_ptr0 + 3 * x1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (3 * x0 + 6 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 3 * x1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 3 * x0 + 6 * x2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 3 * x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 3 * x0 + 6 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tl.store(out_ptr0 + x3, tmp10, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 2, 3), (6, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((240, 240, 3), (720, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_affine_grid_generator_0[grid(172800)](buf1, 172800,
XBLOCK=512, num_warps=8, num_stages=1)
buf2 = empty_strided_cuda((4, 57600, 2), (115200, 2, 1), torch.float32)
triton_poi_fused_affine_grid_generator_1[grid(460800)](buf1, arg0_1,
buf2, 460800, XBLOCK=512, num_warps=8, num_stages=1)
del arg0_1
del buf1
return reinterpret_tensor(buf2, (4, 240, 240, 2), (115200, 480, 2, 1), 0),
class AffineGridGenNew(Module):
def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True):
super(AffineGridGenNew, self).__init__()
self.out_h = out_h
self.out_w = out_w
self.out_ch = out_ch
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mcimpoi/ncnet | AffineGridGen | false | 16,030 | [
"MIT"
] | 159 | d801df77154bce9e5653090273aacb0e588fa4ea | https://github.com/mcimpoi/ncnet/tree/d801df77154bce9e5653090273aacb0e588fa4ea |
L2ConstrainedLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/au/cau37fsbdwwaul2hbsoeaql7ksnelycdmmavhnfvtizwykwl7ppp.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, l2, truediv, x], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# l2 => sqrt
# pow_1 => pow_1
# sum_1 => sum_1
# truediv => div
# x => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 16), kwargs = {})
triton_per_fused_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_per_fused_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = libdevice.sqrt(tmp4)
tmp6 = tmp0 / tmp5
tmp7 = 16.0
tmp8 = tmp6 * tmp7
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, l2, truediv, x], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_pow_sqrt_sum_0.run(arg0_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class L2ConstrainedLayer(nn.Module):
def __init__(self, alpha=16):
super().__init__()
self.alpha = alpha
def forward(self, x):
l2 = torch.sqrt((x ** 2).sum())
x = self.alpha * (x / l2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = libdevice.sqrt(tmp4)
tmp6 = tmp0 / tmp5
tmp7 = 16.0
tmp8 = tmp6 * tmp7
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_pow_sqrt_sum_0[grid(1)](arg0_1, buf1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class L2ConstrainedLayerNew(nn.Module):
def __init__(self, alpha=16):
super().__init__()
self.alpha = alpha
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mgoldchild/metric_learning | L2ConstrainedLayer | false | 16,031 | [
"MIT"
] | 58 | 97731bd0922b42df470ec6be34e1138bbcca5fb7 | https://github.com/mgoldchild/metric_learning/tree/97731bd0922b42df470ec6be34e1138bbcca5fb7 |
LogCoshLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zx/czxmxzopctcgackydle44efrurdnxzu5wrykeqxx3yz7yy6vfch5.py
# Topologically Sorted Source Nodes: [loss, add, cosh, log, mean], Original ATen: [aten.sub, aten.add, aten.cosh, aten.log, aten.mean]
# Source node to ATen node mapping:
# add => add
# cosh => cosh
# log => log
# loss => sub
# mean => mean
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-12), kwargs = {})
# %cosh : [num_users=1] = call_function[target=torch.ops.aten.cosh.default](args = (%add,), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%cosh,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {})
triton_per_fused_add_cosh_log_mean_sub_0 = async_compile.triton('triton_per_fused_add_cosh_log_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cosh_log_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_cosh_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = 1e-12
tmp4 = tmp2 + tmp3
tmp5 = libdevice.cosh(tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, add, cosh, log, mean], Original ATen: [aten.sub, aten.add, aten.cosh, aten.log, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_cosh_log_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class LogCoshLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, true, pred):
loss = true - pred
return torch.mean(torch.log(torch.cosh(loss + 1e-12)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_cosh_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 1e-12
tmp4 = tmp2 + tmp3
tmp5 = libdevice.cosh(tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_cosh_log_mean_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogCoshLossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mfredriksz/semanticGAN_code | LogCoshLoss | false | 16,033 | [
"BSD-2-Clause",
"MIT"
] | 107 | c6e7b490086afd8a7593e2892452295555910494 | https://github.com/mfredriksz/semanticGAN_code/tree/c6e7b490086afd8a7593e2892452295555910494 |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/w3/cw3egt7ajdde7mbqzrdxs4mdcaxj75b4l3brz5gbsf4yd73gbids.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gy/cgyif5zqnxycmk2s3w5gvkfiwexmu3rndp5va76w52tlidlcyfyd.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 3072), (3072, 1))
assert_size_stride(primals_2, (512, 3072), (3072, 1))
assert_size_stride(primals_3, (512, ), (1, ))
assert_size_stride(primals_4, (512, 512), (512, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (10, 512), (512, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (3072, 512), (1, 3072), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 2048, grid=grid(2048), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (512, 10), (1, 512), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 10), (10, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf5, primals_7, buf6, 40, grid=grid(40), stream=stream0)
del primals_7
return (buf5, primals_1, buf1, buf3, buf6, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3072), (3072, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, 3072), (3072, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, num_class=10):
super(MLP, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, num_class)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = x.view(-1, 32 * 32 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 3072])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 3072), (3072, 1))
assert_size_stride(primals_2, (512, 3072), (3072, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (512, 512), (512, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (10, 512), (512, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (3072,
512), (1, 3072), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 512), (
1, 512), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(2048)](buf3, primals_5, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (512, 10), (1,
512), 0), out=buf4)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 10), (10, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(40)](buf5,
primals_7, buf6, 40, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return buf5, primals_1, buf1, buf3, buf6, primals_6, primals_4
class MLPNew(nn.Module):
def __init__(self, num_class=10):
super(MLPNew, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, num_class)
self.dropout = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| mattkelleher/Nasty-Teacher | MLP | false | 16,034 | [
"MIT"
] | 59 | 7cca6e41aca10dcceeb215fa15107baae91e0140 | https://github.com/mattkelleher/Nasty-Teacher/tree/7cca6e41aca10dcceeb215fa15107baae91e0140 |
SoftmaxLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jh/cjhafiazvhnqzahvgpeyzzxgeb5atp7ebiv4plitnblial63qxb6.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py
# Topologically Sorted Source Nodes: [l], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# l => div_1, exp, log, mul, neg, sub_1, sum_1, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [l], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class SoftmaxLoss(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, pred, true):
logits = pred / self.tau
l = self.ce_loss(logits, true)
return l
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class SoftmaxLossNew(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mfredriksz/semanticGAN_code | SoftmaxLoss | false | 16,035 | [
"BSD-2-Clause",
"MIT"
] | 107 | c6e7b490086afd8a7593e2892452295555910494 | https://github.com/mfredriksz/semanticGAN_code/tree/c6e7b490086afd8a7593e2892452295555910494 |
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fo/cfobumeerlxxxdwow6t6kkxoz26gxtnl4w5nr5d4kbmcxfu3a7tz.py
# Topologically Sorted Source Nodes: [AxW, AxW_1, AxW_2], Original ATen: [aten.add, aten.div, aten.leaky_relu]
# Source node to ATen node mapping:
# AxW => add
# AxW_1 => div
# AxW_2 => gt, mul, where
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 4), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%div, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %div, %mul), kwargs = {})
triton_poi_fused_add_div_leaky_relu_0 = async_compile.triton('triton_poi_fused_add_div_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_leaky_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.01
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [AxW, AxW_1, AxW_2], Original ATen: [aten.add, aten.div, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_leaky_relu_0.run(buf1, primals_4, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf1
del buf2
del primals_4
return (buf4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class GCN(nn.Module):
def __init__(self, cfg):
super(GCN, self).__init__()
self.num_layers = cfg.num_layers
self.input_size = cfg.input_size
self.hidden_size = cfg.hidden_size
self.dropout = cfg.dropout
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.fcs = nn.ModuleList([nn.Linear(self.hidden_size, self.
hidden_size) for i in range(self.num_layers - 1)])
self.dropout = nn.Dropout(self.dropout)
def forward(self, x, adj):
L = x.size(1)
AxW = self.fc1(torch.bmm(adj, x)) + self.fc1(x)
AxW = AxW / L
AxW = F.leaky_relu(AxW)
AxW = self.dropout(AxW)
for fc in self.fcs:
AxW = fc(torch.bmm(adj, AxW)) + fc(AxW)
AxW = AxW / L
AxW = F.leaky_relu(AxW)
AxW = self.dropout(AxW)
return AxW
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(num_layers=1, input_size=4,
hidden_size=4, dropout=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_leaky_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.01
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_leaky_relu_0[grid(64)](buf1, primals_4,
buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_4
return buf4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf3
class GCNNew(nn.Module):
def __init__(self, cfg):
super(GCNNew, self).__init__()
self.num_layers = cfg.num_layers
self.input_size = cfg.input_size
self.hidden_size = cfg.hidden_size
self.dropout = cfg.dropout
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.fcs = nn.ModuleList([nn.Linear(self.hidden_size, self.
hidden_size) for i in range(self.num_layers - 1)])
self.dropout = nn.Dropout(self.dropout)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mengtinglll/deepke | GCN | false | 16,036 | [
"Apache-2.0"
] | 173 | da1649865c496317b45f0b26e9ea599c9f509ed0 | https://github.com/mengtinglll/deepke/tree/da1649865c496317b45f0b26e9ea599c9f509ed0 |
CDCM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=5] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bc/cbceu7uft4bvnnmuhlvfkeuoro2yg2e3qvmonxm6h5vmskmmqqai.py
# Topologically Sorted Source Nodes: [add, add_1, add_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convolution_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %convolution_4), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(5, 5), dilation=(5, 5), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(7, 7), dilation=(7, 7), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(9, 9), dilation=(9, 9), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1), padding=(11, 11), dilation=(11, 11), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [add, add_1, add_2], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf7, buf4, buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf4
del buf5
del buf6
return (buf7, primals_2, primals_4, primals_5, primals_6, primals_7, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CDCM(nn.Module):
"""
Compact Dilation Convolution based Module
"""
def __init__(self, in_channels, out_channels):
super(CDCM, self).__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1,
padding=0)
self.conv2_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=5, padding=5, bias=False)
self.conv2_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=7, padding=7, bias=False)
self.conv2_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=9, padding=9, bias=False)
self.conv2_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=11, padding=11, bias=False)
nn.init.constant_(self.conv1.bias, 0)
def forward(self, x):
x = self.relu1(x)
x = self.conv1(x)
x1 = self.conv2_1(x)
x2 = self.conv2_2(x)
x3 = self.conv2_3(x)
x4 = self.conv2_4(x)
return x1 + x2 + x3 + x4
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(5, 5), dilation=(5, 5), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(7, 7), dilation=(7, 7), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(9, 9), dilation=(9, 9), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(11, 11), dilation=(11, 11), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf3
del buf3
triton_poi_fused_add_2[grid(256)](buf7, buf4, buf5, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del buf5
del buf6
return (buf7, primals_2, primals_4, primals_5, primals_6, primals_7,
buf0, buf2)
class CDCMNew(nn.Module):
"""
Compact Dilation Convolution based Module
"""
def __init__(self, in_channels, out_channels):
super(CDCMNew, self).__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1,
padding=0)
self.conv2_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=5, padding=5, bias=False)
self.conv2_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=7, padding=7, bias=False)
self.conv2_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=9, padding=9, bias=False)
self.conv2_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
dilation=11, padding=11, bias=False)
nn.init.constant_(self.conv1.bias, 0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2_1.weight
primals_5 = self.conv2_2.weight
primals_6 = self.conv2_3.weight
primals_7 = self.conv2_4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| mgpadalkar/pidinet | CDCM | false | 16,037 | [
"MIT"
] | 137 | 781924fe30469cdc64f63ce6666a3e1f5b4e576f | https://github.com/mgpadalkar/pidinet/tree/781924fe30469cdc64f63ce6666a3e1f5b4e576f |
PDCBlock_converted | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# y_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/43/c43iah2ujzdzlzvirc5zcusvrhdz3liemhgusdpro5bcmzekdxpa.py
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# y_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_2), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_2, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PDCBlock_converted(nn.Module):
"""
CPDC, APDC can be converted to vanilla 3x3 convolution
RPDC can be converted to vanilla 5x5 convolution
"""
def __init__(self, pdc, inplane, ouplane, stride=1):
super(PDCBlock_converted, self).__init__()
self.stride = stride
if self.stride > 1:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1,
padding=0)
if pdc == 'rd':
self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=5, padding
=2, groups=inplane, bias=False)
else:
self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=3, padding
=1, groups=inplane, bias=False)
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0,
bias=False)
def forward(self, x):
if self.stride > 1:
x = self.pool(x)
y = self.conv1(x)
y = self.relu2(y)
y = self.conv2(y)
if self.stride > 1:
x = self.shortcut(x)
y = y + x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pdc': 4, 'inplane': 4, 'ouplane': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1
class PDCBlock_convertedNew(nn.Module):
"""
CPDC, APDC can be converted to vanilla 3x3 convolution
RPDC can be converted to vanilla 5x5 convolution
"""
def __init__(self, pdc, inplane, ouplane, stride=1):
super(PDCBlock_convertedNew, self).__init__()
self.stride = stride
if self.stride > 1:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1,
padding=0)
if pdc == 'rd':
self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=5, padding
=2, groups=inplane, bias=False)
else:
self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=3, padding
=1, groups=inplane, bias=False)
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0,
bias=False)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mgpadalkar/pidinet | PDCBlock_converted | false | 16,038 | [
"MIT"
] | 137 | 781924fe30469cdc64f63ce6666a3e1f5b4e576f | https://github.com/mgpadalkar/pidinet/tree/781924fe30469cdc64f63ce6666a3e1f5b4e576f |
SplitCosineLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ce/cceitkb57gvjhg5ldpl5r7iv6yefxb7tczymuq3ptw4ifiklh2os.py
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize_1 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z6/cz6vu634zwdblkopcpp7pq3pcympjsdtdsbziiv2onba4ieimu56.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.cat, aten.mul]
# Source node to ATen node mapping:
# out_2 => cat
# out_3 => mul
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %view_3], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %cat), kwargs = {})
triton_poi_fused_cat_mul_2 = async_compile.triton('triton_poi_fused_cat_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp11 = tl.load(in_ptr2 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp13 = tmp12 * tmp10
tl.store(out_ptr0 + (x3), tmp10, xmask)
tl.store(out_ptr1 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [normalize_3], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_3, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf4)
del buf3
buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.cat, aten.mul]
triton_poi_fused_cat_mul_2.run(buf2, buf4, primals_4, buf5, buf6, 512, grid=grid(512), stream=stream0)
del buf2
del buf4
return (buf6, primals_2, primals_3, primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.nn.modules.module import Module
class CosineLinear(Module):
def __init__(self, in_features, out_features, sigma=True):
super(CosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if sigma:
self.sigma = Parameter(torch.Tensor(1))
else:
self.register_parameter('sigma', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.sigma is not None:
self.sigma.data.fill_(1)
def forward(self, input):
out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self.
weight, p=2, dim=1))
if self.sigma is not None:
out = self.sigma * out
return out
class SplitCosineLinear(Module):
def __init__(self, in_features, out_features1, out_features2, sigma=True):
super(SplitCosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features1 + out_features2
self.fc1 = CosineLinear(in_features, out_features1, False)
self.fc2 = CosineLinear(in_features, out_features2, False)
if sigma:
self.sigma = Parameter(torch.Tensor(1))
self.sigma.data.fill_(1)
else:
self.register_parameter('sigma', None)
def forward(self, x):
out1 = self.fc1(x)
out2 = self.fc2(x)
out = torch.cat((out1, out2), dim=1)
if self.sigma is not None:
out = self.sigma * out
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features1': 4, 'out_features2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_cat_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp11 = tl.load(in_ptr2 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp13 = tmp12 * tmp10
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf1
del buf1
triton_poi_fused_div_1[grid(16)](primals_3, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf4)
del buf3
buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_mul_2[grid(512)](buf2, buf4, primals_4, buf5,
buf6, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del buf4
return buf6, primals_2, primals_3, primals_4, reinterpret_tensor(buf0,
(64, 4), (4, 1), 0), buf5
class CosineLinear(Module):
def __init__(self, in_features, out_features, sigma=True):
super(CosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if sigma:
self.sigma = Parameter(torch.Tensor(1))
else:
self.register_parameter('sigma', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.sigma is not None:
self.sigma.data.fill_(1)
def forward(self, input):
out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self.
weight, p=2, dim=1))
if self.sigma is not None:
out = self.sigma * out
return out
class SplitCosineLinearNew(Module):
def __init__(self, in_features, out_features1, out_features2, sigma=True):
super(SplitCosineLinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features1 + out_features2
self.fc1 = CosineLinear(in_features, out_features1, False)
self.fc2 = CosineLinear(in_features, out_features2, False)
if sigma:
self.sigma = Parameter(torch.Tensor(1))
self.sigma.data.fill_(1)
else:
self.register_parameter('sigma', None)
def forward(self, input_0):
primals_4 = self.sigma
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mhd-medfa/class-incremental-learning | SplitCosineLinear | false | 16,039 | [
"MIT"
] | 241 | c7c0a217d07b285f215672b3021beee52d4ef74f | https://github.com/mhd-medfa/class-incremental-learning/tree/c7c0a217d07b285f215672b3021beee52d4ef74f |
TreeLSTM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ws/cwsyrllv2dhlbraqtsqyfoykchdot6g3id5jpn6ekpusd2gqgfjh.py
# Topologically Sorted Source Nodes: [tanh, sigmoid, mul, sigmoid_1, mul_1, add, sigmoid_2, mul_2, c, sigmoid_3, tanh_1, h], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.add, aten.sigmoid_backward]
# Source node to ATen node mapping:
# add => add_1
# c => add_2
# h => mul_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sigmoid => sigmoid
# sigmoid_1 => sigmoid_1
# sigmoid_2 => sigmoid_2
# sigmoid_3 => sigmoid_3
# tanh => tanh
# tanh_1 => tanh_1
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_5,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_11,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {})
# %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_17,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %select_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sigmoid_2 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_23,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %select_3), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {})
# %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_29,), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %tanh_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_2), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %sub_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_3), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 22, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (20*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + (20*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (8 + x0 + (20*x1)), xmask)
tmp18 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr4 + (16 + x2), xmask)
tmp28 = tl.load(in_ptr0 + (12 + x0 + (20*x1)), xmask)
tmp29 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr5 + (4 + x0), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (16 + x0 + (20*x1)), xmask)
tmp45 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp16 = tmp7 * tmp15
tmp19 = tmp17 + tmp18
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.sigmoid(tmp23)
tmp26 = tmp24 * tmp25
tmp27 = tmp16 + tmp26
tmp30 = tmp28 + tmp29
tmp33 = tmp31 + tmp32
tmp34 = tmp30 + tmp33
tmp35 = tl.sigmoid(tmp34)
tmp37 = tmp35 * tmp36
tmp38 = tmp27 + tmp37
tmp39 = 1.0
tmp40 = tmp39 - tmp35
tmp41 = tmp35 * tmp40
tmp42 = tmp39 - tmp24
tmp43 = tmp24 * tmp42
tmp46 = tmp44 + tmp45
tmp49 = tmp47 + tmp48
tmp50 = tmp46 + tmp49
tmp51 = tl.sigmoid(tmp50)
tmp52 = libdevice.tanh(tmp38)
tmp53 = tmp51 * tmp52
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr2 + (x2), tmp38, xmask)
tl.store(out_ptr3 + (x2), tmp41, xmask)
tl.store(out_ptr4 + (x2), tmp43, xmask)
tl.store(out_ptr5 + (x2), tmp51, xmask)
tl.store(out_ptr6 + (x2), tmp53, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (20, 4), (4, 1))
assert_size_stride(primals_3, (20, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (20, 4), (4, 1))
assert_size_stride(primals_6, (20, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 20), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((1, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (1, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 20), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, sigmoid, mul, sigmoid_1, mul_1, add, sigmoid_2, mul_2, c, sigmoid_3, tanh_1, h], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.add, aten.sigmoid_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0.run(buf0, primals_3, buf1, primals_6, primals_1, primals_4, buf2, buf3, buf4, buf7, buf8, buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf0
del buf1
del primals_3
del primals_6
return (buf6, buf4, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (1, 4), (4, 1), 0), buf2, buf3, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, ), (1, ), 4), buf4, buf5, buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class TreeLSTM(nn.Module):
def __init__(self, num_units):
super(TreeLSTM, self).__init__()
self.num_units = num_units
self.left = nn.Linear(num_units, 5 * num_units)
self.right = nn.Linear(num_units, 5 * num_units)
def forward(self, left_in, right_in):
lstm_in = self.left(left_in[0])
lstm_in += self.right(right_in[0])
a, i, f1, f2, o = lstm_in.chunk(5, 1)
c = a.tanh() * i.sigmoid() + f1.sigmoid() * left_in[1] + f2.sigmoid(
) * right_in[1]
h = o.sigmoid() * c.tanh()
return h, c
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_units': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + 20 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (8 + x0 + 20 * x1), xmask)
tmp18 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr4 + (16 + x2), xmask)
tmp28 = tl.load(in_ptr0 + (12 + x0 + 20 * x1), xmask)
tmp29 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr5 + (4 + x0), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (16 + x0 + 20 * x1), xmask)
tmp45 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp16 = tmp7 * tmp15
tmp19 = tmp17 + tmp18
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.sigmoid(tmp23)
tmp26 = tmp24 * tmp25
tmp27 = tmp16 + tmp26
tmp30 = tmp28 + tmp29
tmp33 = tmp31 + tmp32
tmp34 = tmp30 + tmp33
tmp35 = tl.sigmoid(tmp34)
tmp37 = tmp35 * tmp36
tmp38 = tmp27 + tmp37
tmp39 = 1.0
tmp40 = tmp39 - tmp35
tmp41 = tmp35 * tmp40
tmp42 = tmp39 - tmp24
tmp43 = tmp24 * tmp42
tmp46 = tmp44 + tmp45
tmp49 = tmp47 + tmp48
tmp50 = tmp46 + tmp49
tmp51 = tl.sigmoid(tmp50)
tmp52 = libdevice.tanh(tmp38)
tmp53 = tmp51 * tmp52
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp38, xmask)
tl.store(out_ptr3 + x2, tmp41, xmask)
tl.store(out_ptr4 + x2, tmp43, xmask)
tl.store(out_ptr5 + x2, tmp51, xmask)
tl.store(out_ptr6 + x2, tmp53, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (20, 4), (4, 1))
assert_size_stride(primals_3, (20,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (20, 4), (4, 1))
assert_size_stride(primals_6, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 20), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((1, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (1, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 20), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(16)](buf0
, primals_3, buf1, primals_6, primals_1, primals_4, buf2, buf3,
buf4, buf7, buf8, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_3
del primals_6
return buf6, buf4, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (1, 4), (4, 1), 0
), buf2, buf3, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_4, (4,), (1,), 4), buf4, buf5, buf7, buf8
class TreeLSTMNew(nn.Module):
def __init__(self, num_units):
super(TreeLSTMNew, self).__init__()
self.num_units = num_units
self.left = nn.Linear(num_units, 5 * num_units)
self.right = nn.Linear(num_units, 5 * num_units)
def forward(self, input_0, input_1):
primals_2 = self.left.weight
primals_3 = self.left.bias
primals_5 = self.right.weight
primals_6 = self.right.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| mhoangvslev/torchfold | TreeLSTM | false | 16,040 | [
"Apache-2.0"
] | 160 | 9285c7889f2e1966fb94c4b8a3e91bcd60e40ab2 | https://github.com/mhoangvslev/torchfold/tree/9285c7889f2e1966fb94c4b8a3e91bcd60e40ab2 |
DotProductSimilarity | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/te/ctez6v53ld4k45ykiwq5ndlpz3jrny2atstdxadog7gcnit65abw.py
# Topologically Sorted Source Nodes: [mul, result], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# result => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, result], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class SimilarityFunction(nn.Module):
"""
A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity
function on the vectors in the last dimension. For example, the tensors might both have shape
`(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two
vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a
tensor of shape `(batch_size, sentence_length)`.
The similarity function could be as simple as a dot product, or it could be a more complex,
parameterized function.
"""
default_implementation = 'dot_product'
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension
and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``.
"""
raise NotImplementedError
class DotProductSimilarity(SimilarityFunction):
"""
This similarity function simply computes the dot product between each pair of vectors, with an
optional scaling to reduce the variance of the output elements.
Parameters
----------
scale_output : ``bool``, optional
If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the
variance in the result.
"""
def __init__(self, scale_output: 'bool'=False) ->None:
super(DotProductSimilarity, self).__init__()
self._scale_output = scale_output
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
result = (tensor_1 * tensor_2).sum(dim=-1)
if self._scale_output:
result *= math.sqrt(tensor_1.size(-1))
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SimilarityFunction(nn.Module):
"""
A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity
function on the vectors in the last dimension. For example, the tensors might both have shape
`(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two
vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a
tensor of shape `(batch_size, sentence_length)`.
The similarity function could be as simple as a dot product, or it could be a more complex,
parameterized function.
"""
default_implementation = 'dot_product'
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension
and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``.
"""
raise NotImplementedError
class DotProductSimilarityNew(SimilarityFunction):
"""
This similarity function simply computes the dot product between each pair of vectors, with an
optional scaling to reduce the variance of the output elements.
Parameters
----------
scale_output : ``bool``, optional
If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the
variance in the result.
"""
def __init__(self, scale_output: 'bool'=False) ->None:
super(DotProductSimilarityNew, self).__init__()
self._scale_output = scale_output
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| michiyasunaga/GreaseLM | DotProductSimilarity | false | 16,041 | [
"MIT"
] | 76 | 596aa5047841e3e97730f621a2e4576772733df2 | https://github.com/michiyasunaga/GreaseLM/tree/596aa5047841e3e97730f621a2e4576772733df2 |
CSAM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# y => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# y_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k4/ck42otglczd2qtk6fovmlu2yv7bzgywgiadyjygqbvu4m2rftbjm.py
# Topologically Sorted Source Nodes: [y_3, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# y_3 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (1, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_3, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_2.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, primals_1, primals_2, primals_4, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CSAM(nn.Module):
"""
Compact Spatial Attention Module
"""
def __init__(self, channels):
super(CSAM, self).__init__()
mid_channels = 4
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, padding=0
)
self.conv2 = nn.Conv2d(mid_channels, 1, kernel_size=3, padding=1,
bias=False)
self.sigmoid = nn.Sigmoid()
nn.init.constant_(self.conv1.bias, 0)
def forward(self, x):
y = self.relu1(x)
y = self.conv1(y)
y = self.conv2(y)
y = self.sigmoid(y)
return x * y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf4, primals_1, primals_2, primals_4, buf0, buf2, buf3
class CSAMNew(nn.Module):
"""
Compact Spatial Attention Module
"""
def __init__(self, channels):
super(CSAMNew, self).__init__()
mid_channels = 4
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, padding=0
)
self.conv2 = nn.Conv2d(mid_channels, 1, kernel_size=3, padding=1,
bias=False)
self.sigmoid = nn.Sigmoid()
nn.init.constant_(self.conv1.bias, 0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mgpadalkar/pidinet | CSAM | false | 16,042 | [
"MIT"
] | 137 | 781924fe30469cdc64f63ce6666a3e1f5b4e576f | https://github.com/mgpadalkar/pidinet/tree/781924fe30469cdc64f63ce6666a3e1f5b4e576f |
Conv2dMtl | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rk/crkfmuhs27dkjqsq6pxcyyrqpabpdzmjns36omhnvtv2fncvasqj.py
# Topologically Sorted Source Nodes: [new_weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# new_weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %expand), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sj/csjsifitl6m7djprg2t5r2irfxyh2adlzbqfwpzx5pvayjxrvntq.py
# Topologically Sorted Source Nodes: [new_bias, conv2d], Original ATen: [aten.add, aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# new_bias => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %primals_4), kwargs = {})
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_5, %mul, %add, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/md/cmdyiusa6vngiefvyygua636eoibl42y6wodrxp7oqwo6kirzhtb.py
# Topologically Sorted Source Nodes: [new_bias, conv2d], Original ATen: [aten.add, aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# new_bias => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %primals_4), kwargs = {})
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_5, %mul, %add, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [new_weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [new_bias, conv2d], Original ATen: [aten.add, aten.convolution]
triton_poi_fused_add_convolution_1.run(primals_3, primals_4, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [new_bias, conv2d], Original ATen: [aten.add, aten.convolution]
buf2 = extern_kernels.convolution(primals_5, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [new_bias, conv2d], Original ATen: [aten.add, aten.convolution]
triton_poi_fused_add_convolution_2.run(buf3, buf1, 16, grid=grid(16), stream=stream0)
del buf1
return (buf3, primals_2, primals_5, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
class _ConvNdMtl(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNdMtl, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(in_channels, out_channels //
groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(in_channels,
out_channels // groups, 1, 1))
else:
self.weight = Parameter(torch.Tensor(out_channels, in_channels //
groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(out_channels,
in_channels // groups, 1, 1))
self.weight.requires_grad = False
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
self.bias.requires_grad = False
self.mtl_bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.register_parameter('mtl_bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.mtl_weight.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.mtl_bias.data.uniform_(0, 0)
def extra_repr(self):
s = (
'{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Conv2dMtl(_ConvNdMtl):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2dMtl, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias)
def forward(self, input):
new_mtl_weight = self.mtl_weight.expand(self.weight.shape)
new_weight = self.weight.mul(new_mtl_weight)
if self.bias is not None:
new_bias = self.bias + self.mtl_bias
else:
new_bias = None
return F.conv2d(input, new_weight, new_bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_add_convolution_1[grid(4)](primals_3, primals_4,
buf1, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
del primals_4
buf2 = extern_kernels.convolution(primals_5, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_2[grid(16)](buf3, buf1, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del buf1
return buf3, primals_2, primals_5, buf0
class _ConvNdMtl(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNdMtl, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(in_channels, out_channels //
groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(in_channels,
out_channels // groups, 1, 1))
else:
self.weight = Parameter(torch.Tensor(out_channels, in_channels //
groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(out_channels,
in_channels // groups, 1, 1))
self.weight.requires_grad = False
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
self.bias.requires_grad = False
self.mtl_bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.register_parameter('mtl_bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.mtl_weight.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.mtl_bias.data.uniform_(0, 0)
def extra_repr(self):
s = (
'{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Conv2dMtlNew(_ConvNdMtl):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2dMtlNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = self.mtl_weight
primals_3 = self.bias
primals_4 = self.mtl_bias
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| mhd-medfa/class-incremental-learning | Conv2dMtl | false | 16,043 | [
"MIT"
] | 241 | c7c0a217d07b285f215672b3021beee52d4ef74f | https://github.com/mhd-medfa/class-incremental-learning/tree/c7c0a217d07b285f215672b3021beee52d4ef74f |
OutputLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7i/c7ie5z5gkvggaw4eftbzxjyfxmnzr2dlnul2lhesydf52egofgee.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%index, %index_1, %index_2, %index_3],), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert(((0 <= tl.broadcast_to(tmp9, [XBLOCK])) & (tl.broadcast_to(tmp9, [XBLOCK]) < 4)) | ~(tmp4 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp9, [XBLOCK]) < 4")
tmp11 = tl.load(in_ptr1 + (tl.broadcast_to(tmp9, [XBLOCK])), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (4 + ((-4) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp6
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tl.device_assert(((0 <= tl.broadcast_to(tmp19, [XBLOCK])) & (tl.broadcast_to(tmp19, [XBLOCK]) < 4)) | ~(tmp15 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp19, [XBLOCK]) < 4")
tmp21 = tl.load(in_ptr1 + (tl.broadcast_to(4 + tmp19, [XBLOCK])), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 12, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr0 + (8 + ((-8) + x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 + tmp6
tmp28 = tmp26 < 0
tmp29 = tl.where(tmp28, tmp27, tmp26)
tl.device_assert(((0 <= tl.broadcast_to(tmp29, [XBLOCK])) & (tl.broadcast_to(tmp29, [XBLOCK]) < 4)) | ~(tmp25 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp29, [XBLOCK]) < 4")
tmp31 = tl.load(in_ptr1 + (tl.broadcast_to(8 + tmp29, [XBLOCK])), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp0 >= tmp23
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr0 + (12 + ((-12) + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp35 + tmp6
tmp37 = tmp35 < 0
tmp38 = tl.where(tmp37, tmp36, tmp35)
tl.device_assert(((0 <= tl.broadcast_to(tmp38, [XBLOCK])) & (tl.broadcast_to(tmp38, [XBLOCK]) < 4)) | ~(tmp32 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp38, [XBLOCK]) < 4")
tmp40 = tl.load(in_ptr1 + (tl.broadcast_to(12 + tmp38, [XBLOCK])), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + (x0), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg1_1, arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class OutputLayer(nn.Module):
def __init__(self, voxel_size=1.0):
super(OutputLayer, self).__init__()
def forward(self, features_list, index_map_list):
out = []
for feat, index_map in zip(features_list, index_map_list):
out.append(feat[index_map])
return torch.cat(out, 0)
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype
=torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tl.broadcast_to(tmp9, [XBLOCK])) & (tl.
broadcast_to(tmp9, [XBLOCK]) < 4) | ~(tmp4 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp9, [XBLOCK]) < 4')
tmp11 = tl.load(in_ptr1 + tl.broadcast_to(tmp9, [XBLOCK]), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (4 + (-4 + x0)), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp6
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tl.device_assert((0 <= tl.broadcast_to(tmp19, [XBLOCK])) & (tl.
broadcast_to(tmp19, [XBLOCK]) < 4) | ~(tmp15 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp19, [XBLOCK]) < 4')
tmp21 = tl.load(in_ptr1 + tl.broadcast_to(4 + tmp19, [XBLOCK]), tmp15 &
xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 12, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr0 + (8 + (-8 + x0)), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 + tmp6
tmp28 = tmp26 < 0
tmp29 = tl.where(tmp28, tmp27, tmp26)
tl.device_assert((0 <= tl.broadcast_to(tmp29, [XBLOCK])) & (tl.
broadcast_to(tmp29, [XBLOCK]) < 4) | ~(tmp25 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp29, [XBLOCK]) < 4')
tmp31 = tl.load(in_ptr1 + tl.broadcast_to(8 + tmp29, [XBLOCK]), tmp25 &
xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp0 >= tmp23
tl.full([1], 16, tl.int64)
tmp35 = tl.load(in_ptr0 + (12 + (-12 + x0)), tmp32 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp35 + tmp6
tmp37 = tmp35 < 0
tmp38 = tl.where(tmp37, tmp36, tmp35)
tl.device_assert((0 <= tl.broadcast_to(tmp38, [XBLOCK])) & (tl.
broadcast_to(tmp38, [XBLOCK]) < 4) | ~(tmp32 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp38, [XBLOCK]) < 4')
tmp40 = tl.load(in_ptr1 + tl.broadcast_to(12 + tmp38, [XBLOCK]), tmp32 &
xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x0, tmp43, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](arg1_1, arg0_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class OutputLayerNew(nn.Module):
def __init__(self, voxel_size=1.0):
super(OutputLayerNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mi-exwzd/Open3D-ML | OutputLayer | false | 16,044 | [
"MIT"
] | 447 | d58b24edd37de7889446360164cd5500e0bde060 | https://github.com/mi-exwzd/Open3D-ML/tree/d58b24edd37de7889446360164cd5500e0bde060 |
MatrixAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mf/cmfgepipblyia5ftdcd5amfjo2qliicwmbf5psnbmt4xd54uiilk.py
# Topologically Sorted Source Nodes: [mul, result], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# result => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %expand_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, result], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class SimilarityFunction(nn.Module):
"""
A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity
function on the vectors in the last dimension. For example, the tensors might both have shape
`(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two
vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a
tensor of shape `(batch_size, sentence_length)`.
The similarity function could be as simple as a dot product, or it could be a more complex,
parameterized function.
"""
default_implementation = 'dot_product'
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension
and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``.
"""
raise NotImplementedError
class DotProductSimilarity(SimilarityFunction):
"""
This similarity function simply computes the dot product between each pair of vectors, with an
optional scaling to reduce the variance of the output elements.
Parameters
----------
scale_output : ``bool``, optional
If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the
variance in the result.
"""
def __init__(self, scale_output: 'bool'=False) ->None:
super(DotProductSimilarity, self).__init__()
self._scale_output = scale_output
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
result = (tensor_1 * tensor_2).sum(dim=-1)
if self._scale_output:
result *= math.sqrt(tensor_1.size(-1))
return result
class MatrixAttention(nn.Module):
def __init__(self, similarity_function: 'SimilarityFunction'=None) ->None:
super().__init__()
self._similarity_function = (similarity_function or
DotProductSimilarity())
def forward(self, matrix_1: 'torch.Tensor', matrix_2: 'torch.Tensor'
) ->torch.Tensor:
tiled_matrix_1 = matrix_1.unsqueeze(2).expand(matrix_1.size()[0],
matrix_1.size()[1], matrix_2.size()[1], matrix_1.size()[2])
tiled_matrix_2 = matrix_2.unsqueeze(1).expand(matrix_2.size()[0],
matrix_1.size()[1], matrix_2.size()[1], matrix_2.size()[2])
return self._similarity_function(tiled_matrix_1, tiled_matrix_2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SimilarityFunction(nn.Module):
"""
A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity
function on the vectors in the last dimension. For example, the tensors might both have shape
`(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two
vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a
tensor of shape `(batch_size, sentence_length)`.
The similarity function could be as simple as a dot product, or it could be a more complex,
parameterized function.
"""
default_implementation = 'dot_product'
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension
and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``.
"""
raise NotImplementedError
class DotProductSimilarity(SimilarityFunction):
"""
This similarity function simply computes the dot product between each pair of vectors, with an
optional scaling to reduce the variance of the output elements.
Parameters
----------
scale_output : ``bool``, optional
If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the
variance in the result.
"""
def __init__(self, scale_output: 'bool'=False) ->None:
super(DotProductSimilarity, self).__init__()
self._scale_output = scale_output
def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor'
) ->torch.Tensor:
result = (tensor_1 * tensor_2).sum(dim=-1)
if self._scale_output:
result *= math.sqrt(tensor_1.size(-1))
return result
class MatrixAttentionNew(nn.Module):
def __init__(self, similarity_function: 'SimilarityFunction'=None) ->None:
super().__init__()
self._similarity_function = (similarity_function or
DotProductSimilarity())
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| michiyasunaga/GreaseLM | MatrixAttention | false | 16,046 | [
"MIT"
] | 76 | 596aa5047841e3e97730f621a2e4576772733df2 | https://github.com/michiyasunaga/GreaseLM/tree/596aa5047841e3e97730f621a2e4576772733df2 |
HardNegativeContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6b/c6bixyrttikkg53cqwagks4mvhumeai6tdkva5oiiuq4gbnhahnc.py
# Topologically Sorted Source Nodes: [diag_2, mul, scores_1, sort_1], Original ATen: [aten.diag_embed, aten.mul, aten.sub, aten.sort]
# Source node to ATen node mapping:
# diag_2 => eq, full_default, iota, where
# mul => mul
# scores_1 => sub
# sort_1 => sort_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute_1, %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 2), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %mul), kwargs = {})
# %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, 1, True), kwargs = {})
triton_per_fused_diag_embed_mul_sort_sub_0 = async_compile.triton('triton_per_fused_diag_embed_mul_sort_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_diag_embed_mul_sort_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_diag_embed_mul_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp4 = tl.load(in_ptr0 + (5*r1), None, eviction_policy='evict_last')
tmp1 = r1
tmp2 = x0
tmp3 = tmp1 == tmp2
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp1.to(tl.int16)
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, tmp14, = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp9, xmask)
tl.store(out_ptr1 + (r1 + (4*x0)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bs/cbsxmmi256ux5vrzlxdpdqr5wob5stbqcvv56dyd2e4mzqbamq5p.py
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# sort => sort
# Graph fragment:
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, 0, True), kwargs = {})
triton_per_fused_sort_1 = async_compile.triton('triton_per_fused_sort_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r1)), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (x0 + (4*r1)), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wk/cwkjmujyv2r6zzrwcajoo66qsgvmg3m4hwbrrzlhogm6ow5ra2cv.py
# Topologically Sorted Source Nodes: [add, clamp, neg_cap, add_1, clamp_1, neg_img, loss], Original ATen: [aten.add, aten.clamp, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# clamp => clamp_min
# clamp_1 => clamp_min_1
# loss => add_2
# neg_cap => sum_1
# neg_img => sum_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_1, %expand), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %expand_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_clamp_sum_2 = async_compile.triton('triton_per_fused_add_clamp_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (5*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp2 - tmp1
tmp4 = tmp0 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 + tmp3
tmp12 = triton_helpers.maximum(tmp11, tmp5)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = tmp9 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [diag_2, mul, scores_1, sort_1], Original ATen: [aten.diag_embed, aten.mul, aten.sub, aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_diag_embed_mul_sort_sub_0.run(buf0, buf1, buf4, 4, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
triton_per_fused_sort_1.run(buf1, buf2, 4, 4, grid=grid(4), stream=stream0)
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [add, clamp, neg_cap, add_1, clamp_1, neg_img, loss], Original ATen: [aten.add, aten.clamp, aten.sum]
triton_per_fused_add_clamp_sum_2.run(buf8, buf2, buf0, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf2
del buf4
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HardNegativeContrastiveLoss(nn.Module):
def __init__(self, nmax=1, margin=0.2):
super(HardNegativeContrastiveLoss, self).__init__()
self.margin = margin
self.nmax = nmax
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
scores = scores - 2 * torch.diag(scores.diag())
sorted_cap, _ = torch.sort(scores, 0, descending=True)
sorted_img, _ = torch.sort(scores, 1, descending=True)
max_c = sorted_cap[:self.nmax, :]
max_i = sorted_img[:, :self.nmax]
neg_cap = torch.sum(torch.clamp(max_c + (self.margin - diag).view(1,
-1).expand_as(max_c), min=0))
neg_img = torch.sum(torch.clamp(max_i + (self.margin - diag).view(-
1, 1).expand_as(max_i), min=0))
loss = neg_cap + neg_img
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_diag_embed_mul_sort_sub_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last')
tmp1 = r1
tmp2 = x0
tmp3 = tmp1 == tmp2
tmp5 = 0.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp1.to(tl.int16)
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp9, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask)
@triton.jit
def triton_per_fused_sort_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r1), xmask, other=0.0)
tmp1 = r1
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (x0 + 4 * r1), tmp5, xmask)
@triton.jit
def triton_per_fused_add_clamp_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + 5 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = 0.2
tmp3 = tmp2 - tmp1
tmp4 = tmp0 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 + tmp3
tmp12 = triton_helpers.maximum(tmp11, tmp5)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = tmp9 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_diag_embed_mul_sort_sub_0[grid(4)](buf0, buf1,
buf4, 4, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_sort_1[grid(4)](buf1, buf2, 4, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf8 = buf6
del buf6
triton_per_fused_add_clamp_sum_2[grid(1)](buf8, buf2, buf0, buf4, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf2
del buf4
return buf8,
class HardNegativeContrastiveLossNew(nn.Module):
def __init__(self, nmax=1, margin=0.2):
super(HardNegativeContrastiveLossNew, self).__init__()
self.margin = margin
self.nmax = nmax
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| maxgreat/dsve-loc | HardNegativeContrastiveLoss | false | 16,047 | [
"BSD-3-Clause-Clear"
] | 56 | dd6807d02c0d5fd3e215be8e5c7a88e73102e561 | https://github.com/maxgreat/dsve-loc/tree/dd6807d02c0d5fd3e215be8e5c7a88e73102e561 |
GraphLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sj/csjponabvswzx52iypnd62a7oobcrz2pk4pqzcbvywkblpv4e5rw.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %unsqueeze_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (0, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch._utils
class GraphLinear(torch.nn.Module):
"""
Generalization of 1x1 convolutions on Graphs
"""
def __init__(self, in_channels, out_channels):
super(GraphLinear, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.W = torch.nn.Parameter(torch.FloatTensor(out_channels,
in_channels))
self.b = torch.nn.Parameter(torch.FloatTensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
w_stdv = 1 / (self.in_channels * self.out_channels)
self.W.data.uniform_(-w_stdv, w_stdv)
self.b.data.uniform_(-w_stdv, w_stdv)
def forward(self, x):
return torch.matmul(self.W[None, :], x) + self.b[None, :, None]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (0, 4,
1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (16, 4, 4), (16, 1, 4), 0)
class GraphLinearNew(torch.nn.Module):
"""
Generalization of 1x1 convolutions on Graphs
"""
def __init__(self, in_channels, out_channels):
super(GraphLinearNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.W = torch.nn.Parameter(torch.FloatTensor(out_channels,
in_channels))
self.b = torch.nn.Parameter(torch.FloatTensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
w_stdv = 1 / (self.in_channels * self.out_channels)
self.W.data.uniform_(-w_stdv, w_stdv)
self.b.data.uniform_(-w_stdv, w_stdv)
def forward(self, input_0):
primals_1 = self.W
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| microsoft/MeshGraphormer | GraphLinear | false | 16,048 | [
"MIT"
] | 135 | 1c489e35e6bd3848ce0702891e4c8365b584bb8e | https://github.com/microsoft/MeshGraphormer/tree/1c489e35e6bd3848ce0702891e4c8365b584bb8e |
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/uh/cuhkkj7wqrft54edm5qpc7wxakxuqpos25oen2oueas5hzuqxb4w.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qt/cqt5smk332l2kh4wfeyvsehh4o3hjuh44mkvw236faomj45hqkkt.py
# Topologically Sorted Source Nodes: [r, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# r => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fz/cfzbbgswnkfrf4fcn4bvz4dtyfvrzpx2ju7y4ydayloy7enc5zzw.py
# Topologically Sorted Source Nodes: [conv2d_3, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# relu_2 => relu_2
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_10, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16384) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hb/chbxhy4mgquiysf6mksuptrve7c6rj62ctu32lwt5h2zmf4vggl3.py
# Topologically Sorted Source Nodes: [r_2, relu_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# r_2 => convolution_4
# relu_3 => relu_3
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_11, %primals_12, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16384) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wy/cwysnia2ki3v72daiafewposkbcfeyetab7276hbcwf2sirf3xgb.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# interpolate => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_4 = async_compile.triton('triton_poi_fused__to_copy_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/my/cmydqesjxcxqlovj74s4uodm7ouywh7igtyrvqc6eehul6jjyatx.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# interpolate => add_3, clamp_max
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_3, 63), kwargs = {})
triton_poi_fused_add_clamp_5 = async_compile.triton('triton_poi_fused_add_clamp_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_5(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 63, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/n6/cn6pmjpeuhemuxylgmpnuzf6gbi7o7rclasxrgrnxlgzgmoonfxm.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# interpolate => add_2, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (128,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ew/cew4qpn7kbwhjrwxhz4l4gm36ss4tc2kr5ybbjsvorpklkcm2ihm.py
# Topologically Sorted Source Nodes: [conv2d, r_1, m4, conv2d_3, r_3, s, interpolate, m, relu_4], Original ATen: [aten.convolution, aten.add, aten._unsafe_index, aten.sub, aten.mul, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# conv2d_3 => convolution_3
# interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_6, add_7, add_8, mul_2, mul_3, mul_4, sub_3, sub_4, sub_6
# m => add_9
# m4 => add
# r_1 => convolution_2
# r_3 => convolution_5
# relu_4 => relu_4
# s => add_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_2), kwargs = {})
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_10, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_13, %primals_14, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %convolution_5), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_7, %add_6), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_4), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %add_8), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_9,), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i64', 8: '*i64', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 128) % 128
x0 = xindex % 128
x6 = (xindex // 16384)
x2 = (xindex // 16384) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr9 + (x4), None)
tmp48 = tl.load(in_ptr10 + (x2), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr11 + (x4), None)
tmp51 = tl.load(in_ptr12 + (x2), None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr13 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (64*tmp4) + (4096*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr4 + (tmp8 + (64*tmp4) + (4096*x6)), None, eviction_policy='evict_last')
tmp14 = tmp12 + tmp13
tmp15 = tmp11 + tmp14
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (64*tmp19) + (4096*x6)), None, eviction_policy='evict_last')
tmp21 = tmp20 + tmp10
tmp22 = tl.load(in_ptr4 + (tmp8 + (64*tmp19) + (4096*x6)), None, eviction_policy='evict_last')
tmp23 = tmp22 + tmp13
tmp24 = tmp21 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (64*tmp19) + (4096*x6)), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = tl.load(in_ptr4 + (tmp28 + (64*tmp19) + (4096*x6)), None, eviction_policy='evict_last')
tmp32 = tmp31 + tmp13
tmp33 = tmp30 + tmp32
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (64*tmp4) + (4096*x6)), None, eviction_policy='evict_last')
tmp39 = tmp38 + tmp10
tmp40 = tl.load(in_ptr4 + (tmp28 + (64*tmp4) + (4096*x6)), None, eviction_policy='evict_last')
tmp41 = tmp40 + tmp13
tmp42 = tmp39 + tmp41
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp49 = tmp47 + tmp48
tmp52 = tmp50 + tmp51
tmp53 = tmp49 + tmp52
tmp55 = tmp46 * tmp54
tmp56 = tmp37 + tmp55
tmp57 = tmp53 + tmp56
tmp58 = tl.full([1], 0, tl.int32)
tmp59 = triton_helpers.maximum(tmp58, tmp57)
tl.store(in_out_ptr1 + (x4), tmp57, None)
tl.store(out_ptr0 + (x4), tmp59, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6kcb5uvfx66y6gfesn5nwng6gdwsqy3jmevez7uzli4xrduons.py
# Topologically Sorted Source Nodes: [conv2d_8, relu_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# relu_6 => relu_6
# Graph fragment:
# %convolution_8 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_21, %primals_19, %primals_20, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gd/cgdcnncnurkhyuf47ctxmmswy6ase7bnkzxxjixfhneixtdq2zg6.py
# Topologically Sorted Source Nodes: [r_6, relu_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# r_6 => convolution_9
# relu_7 => relu_7
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iw/ciwrydberbrp3z4oeymh7r7sihrxus63pfs3f5yubjb3h4hknmoj.py
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# interpolate_1 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_10 = async_compile.triton('triton_poi_fused__to_copy_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kn/ckndooajco6otrc6td2eecyvxpbnnzr5p32qloyaxdb5nc36cqit.py
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# interpolate_1 => add_13, clamp_max_4
# Graph fragment:
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_13, 127), kwargs = {})
triton_poi_fused_add_clamp_11 = async_compile.triton('triton_poi_fused_add_clamp_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 127, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qn/cqnfg34e4uu2q2aam2izdlz4va2hrpbwxoaq2qlx6uho3ggt7m2g.py
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# interpolate_1 => add_12, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_5, sub_7, sub_9
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (256,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, 0.5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3m/c3myfph35zro4x2xjrcbcp5nf2mtol4jausc4nbd423qmt2dxe3o.py
# Topologically Sorted Source Nodes: [r_5, m_1, conv2d_8, r_7, s_1, interpolate_1, m_2, relu_8], Original ATen: [aten.convolution, aten.add, aten._unsafe_index, aten.sub, aten.mul, aten.relu]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# interpolate_1 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_16, add_17, add_18, mul_7, mul_8, mul_9, sub_10, sub_11, sub_13
# m_1 => add_10
# m_2 => add_19
# r_5 => convolution_7
# r_7 => convolution_10
# relu_8 => relu_8
# s_1 => add_11
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_17, %primals_18, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_10 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %convolution_7), kwargs = {})
# %convolution_8 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_21, %primals_19, %primals_20, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_8, %convolution_10), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_10, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_10, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_10, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_10, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {})
# %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_17, %add_16), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %mul_9), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %add_18), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_19,), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13', 'mutated_arg_names': ['in_out_ptr2'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13(in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x6 = (xindex // 65536)
x2 = (xindex // 65536) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp44 = tl.load(in_out_ptr2 + (x5), None)
tmp45 = tl.load(in_ptr9 + (x2), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr10 + (x5), None)
tmp48 = tl.load(in_ptr11 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (128*tmp4) + (16384*x6)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (tmp8 + (128*tmp4) + (16384*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = tmp9 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp8 + (128*tmp17) + (16384*x6)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr3 + (tmp8 + (128*tmp17) + (16384*x6)), None, eviction_policy='evict_last')
tmp20 = tmp19 + tmp11
tmp21 = tmp18 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp25 + (128*tmp17) + (16384*x6)), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (tmp25 + (128*tmp17) + (16384*x6)), None, eviction_policy='evict_last')
tmp28 = tmp27 + tmp11
tmp29 = tmp26 + tmp28
tmp30 = tmp29 - tmp21
tmp32 = tmp30 * tmp31
tmp33 = tmp21 + tmp32
tmp34 = tl.load(in_ptr2 + (tmp25 + (128*tmp4) + (16384*x6)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (tmp25 + (128*tmp4) + (16384*x6)), None, eviction_policy='evict_last')
tmp36 = tmp35 + tmp11
tmp37 = tmp34 + tmp36
tmp38 = tmp37 - tmp13
tmp39 = tmp38 * tmp31
tmp40 = tmp13 + tmp39
tmp41 = tmp40 - tmp33
tmp43 = tmp41 * tmp42
tmp46 = tmp44 + tmp45
tmp49 = tmp47 + tmp48
tmp50 = tmp46 + tmp49
tmp51 = tmp33 + tmp43
tmp52 = tmp50 + tmp51
tmp53 = tl.full([1], 0, tl.int32)
tmp54 = triton_helpers.maximum(tmp53, tmp52)
tl.store(in_out_ptr2 + (x5), tmp52, None)
tl.store(out_ptr0 + (x5), tmp54, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zr/czrccmychacfvky2exd5lzmoc5pypubyn2hzwufupabcmxvewrgb.py
# Topologically Sorted Source Nodes: [r_9, m_3, relu_10], Original ATen: [aten.convolution, aten.add, aten.relu]
# Source node to ATen node mapping:
# m_3 => add_20
# r_9 => convolution_12
# relu_10 => relu_10
# Graph fragment:
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %convolution_12), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_20,), kwargs = {})
triton_poi_fused_add_convolution_relu_14 = async_compile.triton('triton_poi_fused_add_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_14(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x3), None)
tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(in_out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/co/ccoliwknd22pphpuu6shiuvj6s4smtop3ksipexkrvrcn4netzjg.py
# Topologically Sorted Source Nodes: [p], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# p => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_15 = async_compile.triton('triton_poi_fused__to_copy_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_15(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c7/cc7u7s7ljkbxzbeqq6iiulrwcq7h5jaulun55l5n32lookp7oifr.py
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# p => add_22, clamp_max_8
# Graph fragment:
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_22, 255), kwargs = {})
triton_poi_fused_add_clamp_16 = async_compile.triton('triton_poi_fused_add_clamp_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_16(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 255, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y7/cy7qwzrtvnpoewtkdebidv4ycbnlqcppsleyf4fvts6tacim4tca.py
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# p => add_21, clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_10, sub_14, sub_16
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (1024,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.5), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, 0.25), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_10, 0.5), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0.0), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oe/coeeca6pehecziah5lu474kgesugghmzk4g77ag33vzs5qyqbojl.py
# Topologically Sorted Source Nodes: [p2, p], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# p => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_25, add_26, add_27, mul_12, mul_13, mul_14, sub_17, sub_18, sub_20
# p2 => convolution_13
# Graph fragment:
# %convolution_13 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_13, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_10), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %clamp_max_10), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_26, %add_25), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %clamp_max_11), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_14), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_18 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 1024
x0 = xindex % 1024
x6 = (xindex // 1048576)
x2 = (xindex // 1048576) % 2
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 256, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (256*tmp4) + (65536*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (256*tmp4) + (65536*x6)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (256*tmp25) + (65536*x6)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (256*tmp25) + (65536*x6)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tl.store(in_out_ptr0 + (x4), tmp36, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31 = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 512, 128, 128), (8388608, 16384, 128, 1))
assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, 256, 256, 256), (16777216, 65536, 256, 1))
assert_size_stride(primals_22, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_23, (4, ), (1, ))
assert_size_stride(primals_24, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_25, (4, ), (1, ))
assert_size_stride(primals_26, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_27, (4, ), (1, ))
assert_size_stride(primals_28, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_29, (4, ), (1, ))
assert_size_stride(primals_30, (2, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_31, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [r, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [r_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 64, 64), (16384, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf6 = empty_strided_cuda((4, 4, 128, 128), (65536, 16384, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_9, buf6, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [r_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [r_2, relu_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf8, primals_12, 262144, grid=grid(262144), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [r_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf10 = empty_strided_cuda((128, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_4.run(buf10, 128, grid=grid(128), stream=stream0)
buf11 = empty_strided_cuda((128, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_5.run(buf11, 128, grid=grid(128), stream=stream0)
buf12 = empty_strided_cuda((128, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_4.run(buf12, 128, grid=grid(128), stream=stream0)
buf13 = empty_strided_cuda((128, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_5.run(buf13, 128, grid=grid(128), stream=stream0)
buf16 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6.run(buf16, 128, grid=grid(128), stream=stream0)
buf18 = empty_strided_cuda((128, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6.run(buf18, 128, grid=grid(128), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 128, 128), (65536, 16384, 128, 1), torch.float32)
buf19 = buf15; del buf15 # reuse
buf20 = buf19; del buf19 # reuse
buf21 = empty_strided_cuda((4, 4, 128, 128), (65536, 16384, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, r_1, m4, conv2d_3, r_3, s, interpolate, m, relu_4], Original ATen: [aten.convolution, aten.add, aten._unsafe_index, aten.sub, aten.mul, aten.relu]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7.run(buf20, buf11, buf12, buf0, primals_2, buf4, primals_7, buf10, buf13, buf16, buf5, primals_9, buf9, primals_14, buf18, buf21, 262144, grid=grid(262144), stream=stream0)
del buf0
del buf4
del buf5
del buf9
del primals_14
del primals_2
del primals_7
del primals_9
# Topologically Sorted Source Nodes: [r_4], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [r_4, relu_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf23, primals_16, 262144, grid=grid(262144), stream=stream0)
del primals_16
# Topologically Sorted Source Nodes: [r_5], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 128, 128), (65536, 16384, 128, 1))
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(primals_21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf26 = empty_strided_cuda((4, 4, 256, 256), (262144, 65536, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_8, relu_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf25, primals_20, buf26, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [r_6], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf28 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [r_6, relu_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf28, primals_23, 1048576, grid=grid(1048576), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [r_7], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf28, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf30 = empty_strided_cuda((256, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_10.run(buf30, 256, grid=grid(256), stream=stream0)
buf31 = empty_strided_cuda((256, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_11.run(buf31, 256, grid=grid(256), stream=stream0)
buf32 = empty_strided_cuda((256, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_10.run(buf32, 256, grid=grid(256), stream=stream0)
buf33 = empty_strided_cuda((256, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_11.run(buf33, 256, grid=grid(256), stream=stream0)
buf36 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12.run(buf36, 256, grid=grid(256), stream=stream0)
buf38 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12.run(buf38, 256, grid=grid(256), stream=stream0)
buf40 = buf25; del buf25 # reuse
buf41 = empty_strided_cuda((4, 4, 256, 256), (262144, 65536, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [r_5, m_1, conv2d_8, r_7, s_1, interpolate_1, m_2, relu_8], Original ATen: [aten.convolution, aten.add, aten._unsafe_index, aten.sub, aten.mul, aten.relu]
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13.run(buf40, buf31, buf32, buf20, buf24, primals_18, buf30, buf33, buf36, buf38, primals_20, buf29, primals_25, buf41, 1048576, grid=grid(1048576), stream=stream0)
del buf20
del buf24
del buf29
del primals_18
del primals_20
del primals_25
# Topologically Sorted Source Nodes: [r_8], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [r_8, relu_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf43, primals_27, 1048576, grid=grid(1048576), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [r_9], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf43, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf45 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [r_9, m_3, relu_10], Original ATen: [aten.convolution, aten.add, aten.relu]
triton_poi_fused_add_convolution_relu_14.run(buf45, buf44, primals_29, 1048576, grid=grid(1048576), stream=stream0)
del buf44
del primals_29
# Topologically Sorted Source Nodes: [p2], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 2, 256, 256), (131072, 65536, 256, 1))
buf47 = empty_strided_cuda((1024, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_15.run(buf47, 1024, grid=grid(1024), stream=stream0)
buf48 = empty_strided_cuda((1024, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_16.run(buf48, 1024, grid=grid(1024), stream=stream0)
buf49 = empty_strided_cuda((1024, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_15.run(buf49, 1024, grid=grid(1024), stream=stream0)
buf50 = empty_strided_cuda((1024, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_16.run(buf50, 1024, grid=grid(1024), stream=stream0)
buf51 = empty_strided_cuda((1024, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17.run(buf51, 1024, grid=grid(1024), stream=stream0)
buf53 = empty_strided_cuda((1024, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17.run(buf53, 1024, grid=grid(1024), stream=stream0)
buf54 = empty_strided_cuda((4, 2, 1024, 1024), (2097152, 1048576, 1024, 1), torch.float32)
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [p2, p], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_18.run(buf55, buf47, buf49, buf46, primals_31, buf50, buf51, buf48, buf53, 8388608, grid=grid(8388608), stream=stream0)
del buf46
del primals_31
return (buf55, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_11, primals_13, primals_15, primals_17, primals_19, primals_21, primals_22, primals_24, primals_26, primals_28, primals_30, buf1, buf3, buf6, buf8, buf10, buf11, buf12, buf13, buf16, buf18, buf21, buf23, buf26, buf28, buf30, buf31, buf32, buf33, buf36, buf38, buf41, buf43, buf45, buf47, buf48, buf49, buf50, buf51, buf53, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 512, 128, 128), (8388608, 16384, 128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, 256, 256, 256), (16777216, 65536, 256, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((2, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.nn.functional as F
import torch.utils.data.dataset
class ResBlock(torch.nn.Module):
def __init__(self, indim, outdim=None, stride=1):
super(ResBlock, self).__init__()
if outdim is None:
outdim = indim
if indim == outdim and stride == 1:
self.downsample = None
else:
self.downsample = torch.nn.Conv2d(indim, outdim, kernel_size=3,
padding=1, stride=stride)
self.conv1 = torch.nn.Conv2d(indim, outdim, kernel_size=3, padding=
1, stride=stride)
self.conv2 = torch.nn.Conv2d(outdim, outdim, kernel_size=3, padding=1)
def forward(self, x):
r = self.conv1(F.relu(x))
r = self.conv2(F.relu(r))
if self.downsample is not None:
x = self.downsample(x)
return x + r
class Refine(torch.nn.Module):
def __init__(self, inplanes, planes, scale_factor=2):
super(Refine, self).__init__()
self.convFS = torch.nn.Conv2d(inplanes, planes, kernel_size=3,
padding=1, stride=1)
self.ResFS = ResBlock(planes, planes)
self.ResMM = ResBlock(planes, planes)
self.scale_factor = scale_factor
def forward(self, f, pm):
s = self.ResFS(self.convFS(f))
m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
m = self.ResMM(m)
return m
class Decoder(torch.nn.Module):
def __init__(self, mdim):
super(Decoder, self).__init__()
self.convFM = torch.nn.Conv2d(1024, mdim, kernel_size=3, padding=1,
stride=1)
self.ResMM = ResBlock(mdim, mdim)
self.RF3 = Refine(512, mdim)
self.RF2 = Refine(256, mdim)
self.pred2 = torch.nn.Conv2d(mdim, 2, kernel_size=3, padding=1,
stride=1)
def forward(self, r4, r3, r2):
m4 = self.ResMM(self.convFM(r4))
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
p2 = self.pred2(F.relu(m2))
p = F.interpolate(p2, scale_factor=4, mode='bilinear',
align_corners=False)
return p
def get_inputs():
return [torch.rand([4, 1024, 64, 64]), torch.rand([4, 512, 128, 128]),
torch.rand([4, 256, 256, 256])]
def get_init_inputs():
return [[], {'mdim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.nn.functional as F
import torch.utils.data.dataset
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_4(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 63, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7(in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 128 % 128
x0 = xindex % 128
x6 = xindex // 16384
x2 = xindex // 16384 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr9 + x4, None)
tmp48 = tl.load(in_ptr10 + x2, None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr11 + x4, None)
tmp51 = tl.load(in_ptr12 + x2, None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr13 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr4 + (tmp8 + 64 * tmp4 + 4096 * x6), None,
eviction_policy='evict_last')
tmp14 = tmp12 + tmp13
tmp15 = tmp11 + tmp14
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 64 * tmp19 + 4096 * x6), None,
eviction_policy='evict_last')
tmp21 = tmp20 + tmp10
tmp22 = tl.load(in_ptr4 + (tmp8 + 64 * tmp19 + 4096 * x6), None,
eviction_policy='evict_last')
tmp23 = tmp22 + tmp13
tmp24 = tmp21 + tmp23
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 64 * tmp19 + 4096 * x6), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp10
tmp31 = tl.load(in_ptr4 + (tmp28 + 64 * tmp19 + 4096 * x6), None,
eviction_policy='evict_last')
tmp32 = tmp31 + tmp13
tmp33 = tmp30 + tmp32
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 64 * tmp4 + 4096 * x6), None,
eviction_policy='evict_last')
tmp39 = tmp38 + tmp10
tmp40 = tl.load(in_ptr4 + (tmp28 + 64 * tmp4 + 4096 * x6), None,
eviction_policy='evict_last')
tmp41 = tmp40 + tmp13
tmp42 = tmp39 + tmp41
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp49 = tmp47 + tmp48
tmp52 = tmp50 + tmp51
tmp53 = tmp49 + tmp52
tmp55 = tmp46 * tmp54
tmp56 = tmp37 + tmp55
tmp57 = tmp53 + tmp56
tmp58 = tl.full([1], 0, tl.int32)
tmp59 = triton_helpers.maximum(tmp58, tmp57)
tl.store(in_out_ptr1 + x4, tmp57, None)
tl.store(out_ptr0 + x4, tmp59, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 127, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13(in_out_ptr2,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x6 = xindex // 65536
x2 = xindex // 65536 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp44 = tl.load(in_out_ptr2 + x5, None)
tmp45 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr10 + x5, None)
tmp48 = tl.load(in_ptr11 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 128 * tmp4 + 16384 * x6), None,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (tmp8 + 128 * tmp4 + 16384 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = tmp9 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tmp18 = tl.load(in_ptr2 + (tmp8 + 128 * tmp17 + 16384 * x6), None,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr3 + (tmp8 + 128 * tmp17 + 16384 * x6), None,
eviction_policy='evict_last')
tmp20 = tmp19 + tmp11
tmp21 = tmp18 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp25 + 128 * tmp17 + 16384 * x6), None,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (tmp25 + 128 * tmp17 + 16384 * x6), None,
eviction_policy='evict_last')
tmp28 = tmp27 + tmp11
tmp29 = tmp26 + tmp28
tmp30 = tmp29 - tmp21
tmp32 = tmp30 * tmp31
tmp33 = tmp21 + tmp32
tmp34 = tl.load(in_ptr2 + (tmp25 + 128 * tmp4 + 16384 * x6), None,
eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (tmp25 + 128 * tmp4 + 16384 * x6), None,
eviction_policy='evict_last')
tmp36 = tmp35 + tmp11
tmp37 = tmp34 + tmp36
tmp38 = tmp37 - tmp13
tmp39 = tmp38 * tmp31
tmp40 = tmp13 + tmp39
tmp41 = tmp40 - tmp33
tmp43 = tmp41 * tmp42
tmp46 = tmp44 + tmp45
tmp49 = tmp47 + tmp48
tmp50 = tmp46 + tmp49
tmp51 = tmp33 + tmp43
tmp52 = tmp50 + tmp51
tmp53 = tl.full([1], 0, tl.int32)
tmp54 = triton_helpers.maximum(tmp53, tmp52)
tl.store(in_out_ptr2 + x5, tmp52, None)
tl.store(out_ptr0 + x5, tmp54, None)
@triton.jit
def triton_poi_fused_add_convolution_relu_14(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x3, None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(in_out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused__to_copy_15(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_16(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 255, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_18(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 1024
x0 = xindex % 1024
x6 = xindex // 1048576
x2 = xindex // 1048576 % 2
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 256, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 256 * tmp4 + 65536 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 256 * tmp4 + 65536 * x6), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 256 * tmp25 + 65536 * x6), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 256 * tmp25 + 65536 * x6), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tl.store(in_out_ptr0 + x4, tmp36, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 512, 128, 128), (8388608, 16384, 128, 1)
)
assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4, 256, 256, 256), (16777216, 65536,
256, 1))
assert_size_stride(primals_22, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_27, (4,), (1,))
assert_size_stride(primals_28, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (2, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_31, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(65536)](buf0, primals_2,
buf1, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf5 = extern_kernels.convolution(primals_10, primals_8, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf6 = empty_strided_cuda((4, 4, 128, 128), (65536, 16384, 128, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(262144)](buf5, primals_9,
buf6, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_3[grid(262144)](buf8, primals_12,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_12
buf9 = extern_kernels.convolution(buf8, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf10 = empty_strided_cuda((128, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_4[grid(128)](buf10, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((128, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_5[grid(128)](buf11, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((128,), (1,), torch.int64)
triton_poi_fused__to_copy_4[grid(128)](buf12, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((128,), (1,), torch.int64)
triton_poi_fused_add_clamp_5[grid(128)](buf13, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6[grid(128)](buf16,
128, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((128, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_6[grid(128)](buf18,
128, XBLOCK=128, num_warps=4, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 128, 128), (65536, 16384, 128, 1),
torch.float32)
buf19 = buf15
del buf15
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 4, 128, 128), (65536, 16384, 128, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_7[grid(
262144)](buf20, buf11, buf12, buf0, primals_2, buf4, primals_7,
buf10, buf13, buf16, buf5, primals_9, buf9, primals_14, buf18,
buf21, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del buf4
del buf5
del buf9
del primals_14
del primals_2
del primals_7
del primals_9
buf22 = extern_kernels.convolution(buf21, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_3[grid(262144)](buf23, primals_16,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_16
buf24 = extern_kernels.convolution(buf23, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 128, 128), (65536, 16384, 128, 1))
buf25 = extern_kernels.convolution(primals_21, primals_19, stride=(
1, 1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf26 = empty_strided_cuda((4, 4, 256, 256), (262144, 65536, 256, 1
), torch.float32)
triton_poi_fused_convolution_relu_8[grid(1048576)](buf25,
primals_20, buf26, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf28 = buf27
del buf27
triton_poi_fused_convolution_relu_9[grid(1048576)](buf28,
primals_23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf29 = extern_kernels.convolution(buf28, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf30 = empty_strided_cuda((256, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_10[grid(256)](buf30, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf31 = empty_strided_cuda((256, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_11[grid(256)](buf31, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf32 = empty_strided_cuda((256,), (1,), torch.int64)
triton_poi_fused__to_copy_10[grid(256)](buf32, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf33 = empty_strided_cuda((256,), (1,), torch.int64)
triton_poi_fused_add_clamp_11[grid(256)](buf33, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf36 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(256)](buf36,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf38 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(256)](buf38,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf40 = buf25
del buf25
buf41 = empty_strided_cuda((4, 4, 256, 256), (262144, 65536, 256, 1
), torch.float32)
triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_13[grid
(1048576)](buf40, buf31, buf32, buf20, buf24, primals_18, buf30,
buf33, buf36, buf38, primals_20, buf29, primals_25, buf41,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del buf20
del buf24
del buf29
del primals_18
del primals_20
del primals_25
buf42 = extern_kernels.convolution(buf41, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_9[grid(1048576)](buf43,
primals_27, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf44 = extern_kernels.convolution(buf43, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 4, 256, 256), (262144, 65536, 256, 1))
buf45 = buf40
del buf40
triton_poi_fused_add_convolution_relu_14[grid(1048576)](buf45,
buf44, primals_29, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del buf44
del primals_29
buf46 = extern_kernels.convolution(buf45, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 2, 256, 256), (131072, 65536, 256, 1))
buf47 = empty_strided_cuda((1024, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_15[grid(1024)](buf47, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf48 = empty_strided_cuda((1024, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_16[grid(1024)](buf48, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf49 = empty_strided_cuda((1024,), (1,), torch.int64)
triton_poi_fused__to_copy_15[grid(1024)](buf49, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf50 = empty_strided_cuda((1024,), (1,), torch.int64)
triton_poi_fused_add_clamp_16[grid(1024)](buf50, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf51 = empty_strided_cuda((1024,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17[grid(1024)](buf51
, 1024, XBLOCK=256, num_warps=4, num_stages=1)
buf53 = empty_strided_cuda((1024, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_17[grid(1024)](buf53
, 1024, XBLOCK=256, num_warps=4, num_stages=1)
buf54 = empty_strided_cuda((4, 2, 1024, 1024), (2097152, 1048576,
1024, 1), torch.float32)
buf55 = buf54
del buf54
triton_poi_fused__unsafe_index_add_convolution_mul_sub_18[grid(8388608)
](buf55, buf47, buf49, buf46, primals_31, buf50, buf51, buf48,
buf53, 8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del buf46
del primals_31
return (buf55, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_11, primals_13, primals_15, primals_17,
primals_19, primals_21, primals_22, primals_24, primals_26,
primals_28, primals_30, buf1, buf3, buf6, buf8, buf10, buf11, buf12,
buf13, buf16, buf18, buf21, buf23, buf26, buf28, buf30, buf31,
buf32, buf33, buf36, buf38, buf41, buf43, buf45, buf47, buf48,
buf49, buf50, buf51, buf53)
class ResBlock(torch.nn.Module):
def __init__(self, indim, outdim=None, stride=1):
super(ResBlock, self).__init__()
if outdim is None:
outdim = indim
if indim == outdim and stride == 1:
self.downsample = None
else:
self.downsample = torch.nn.Conv2d(indim, outdim, kernel_size=3,
padding=1, stride=stride)
self.conv1 = torch.nn.Conv2d(indim, outdim, kernel_size=3, padding=
1, stride=stride)
self.conv2 = torch.nn.Conv2d(outdim, outdim, kernel_size=3, padding=1)
def forward(self, x):
r = self.conv1(F.relu(x))
r = self.conv2(F.relu(r))
if self.downsample is not None:
x = self.downsample(x)
return x + r
class Refine(torch.nn.Module):
def __init__(self, inplanes, planes, scale_factor=2):
super(Refine, self).__init__()
self.convFS = torch.nn.Conv2d(inplanes, planes, kernel_size=3,
padding=1, stride=1)
self.ResFS = ResBlock(planes, planes)
self.ResMM = ResBlock(planes, planes)
self.scale_factor = scale_factor
def forward(self, f, pm):
s = self.ResFS(self.convFS(f))
m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
m = self.ResMM(m)
return m
class DecoderNew(torch.nn.Module):
def __init__(self, mdim):
super(DecoderNew, self).__init__()
self.convFM = torch.nn.Conv2d(1024, mdim, kernel_size=3, padding=1,
stride=1)
self.ResMM = ResBlock(mdim, mdim)
self.RF3 = Refine(512, mdim)
self.RF2 = Refine(256, mdim)
self.pred2 = torch.nn.Conv2d(mdim, 2, kernel_size=3, padding=1,
stride=1)
def forward(self, input_0, input_1, input_2):
primals_1 = self.convFM.weight
primals_2 = self.convFM.bias
primals_4 = self.ResMM.conv1.weight
primals_5 = self.ResMM.conv1.bias
primals_6 = self.ResMM.conv2.weight
primals_7 = self.ResMM.conv2.bias
primals_8 = self.RF3.convFS.weight
primals_9 = self.RF3.convFS.bias
primals_11 = self.RF3.ResFS.conv1.weight
primals_12 = self.RF3.ResFS.conv1.bias
primals_13 = self.RF3.ResFS.conv2.weight
primals_14 = self.RF3.ResFS.conv2.bias
primals_15 = self.RF3.ResMM.conv1.weight
primals_16 = self.RF3.ResMM.conv1.bias
primals_17 = self.RF3.ResMM.conv2.weight
primals_18 = self.RF3.ResMM.conv2.bias
primals_19 = self.RF2.convFS.weight
primals_20 = self.RF2.convFS.bias
primals_22 = self.RF2.ResFS.conv1.weight
primals_23 = self.RF2.ResFS.conv1.bias
primals_24 = self.RF2.ResFS.conv2.weight
primals_25 = self.RF2.ResFS.conv2.bias
primals_26 = self.RF2.ResMM.conv1.weight
primals_27 = self.RF2.ResMM.conv1.bias
primals_28 = self.RF2.ResMM.conv2.weight
primals_29 = self.RF2.ResMM.conv2.bias
primals_30 = self.pred2.weight
primals_31 = self.pred2.bias
primals_3 = input_0
primals_10 = input_1
primals_21 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
| hzxie/RMNet | Decoder | false | 16,049 | [
"MIT"
] | 66 | 32a16f9c9473463a41dd6e95f72b06dd830fc1eb | https://github.com/hzxie/RMNet/tree/32a16f9c9473463a41dd6e95f72b06dd830fc1eb |
SwaVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3g/c3gd5ipjfbufzu6hvujxufg6z3emufd62cvcqjyy3muqew2xvzbd.py
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pb/cpb5rsib7fg2llnv7dg2enzvdhrqqrlufmxvryadr6zckhtmyysm.py
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_2, [1], True), kwargs = {})
triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ay/cayipr5tzejzfvzl7ba3w7cqhf2axor2ag2cqam22ambv3zsqfqn.py
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_3 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_6, [0], True), kwargs = {})
triton_poi_fused_sum_2 = async_compile.triton('triton_poi_fused_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gf/cgfazley3l5rdd6ypj2skdwbxoltt6cdo3d3mpt4z3kqywdlsil5.py
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_1 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_10, [1], True), kwargs = {})
triton_poi_fused_sum_3 = async_compile.triton('triton_poi_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/im/cimyq4ncn2lt7wfmnkrejd4t7hctagm26a6wsjpob7y2e6gdebuy.py
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_7 => div_7
# Graph fragment:
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_12, 4), kwargs = {})
triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f4/cf4q6mh4hv4abtcbirw4vkce7au36jgqitbjdciddckwumhdqyiq.py
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_9 => div_9
# Graph fragment:
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_16, 4), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zl/czlhaiakbynkvvwkmvsahwmxzw7xdccr5ho7bxsxvn4m725ewzpe.py
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_11 => div_11
# Graph fragment:
# %div_11 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_20, 4), kwargs = {})
triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czqcd5dzpu5vwblosfp6mrfhziw4van5syy52ogdznuvysvejglt.py
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# Q_14 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_26, 4), kwargs = {})
triton_poi_fused_mul_7 = async_compile.triton('triton_poi_fused_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2o/c2oziyswbkb6ooscmnox5xojo4drqahfdvmtgtzz5unwazisafb7.py
# Topologically Sorted Source Nodes: [sum_Q_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q_1 => sum_22
# Graph fragment:
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute_32,), kwargs = {})
# %mul_tensor_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, 1), kwargs = {})
# %amax_default_12 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_12, [1], True), kwargs = {})
# %sub_tensor_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_12, %amax_default_12), kwargs = {})
# %div_tensor_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_12, 0.1), kwargs = {})
# %mul_tensor_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_26, 1), kwargs = {})
# %amax_default_5 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_5, [1], True), kwargs = {})
# %sub_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_5, %amax_default_5), kwargs = {})
# %div_tensor_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_5, 0.1), kwargs = {})
triton_per_fused_sum_8 = async_compile.triton('triton_per_fused_sum_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (16 + r0), None)
tmp9 = tl.load(in_ptr0 + (16 + (4*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (17 + (4*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (18 + (4*r2)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (19 + (4*r2)), None, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gx/cgxhyecrtx7u6qjrmuu7kg45b4dlhwl7jr6ztpezyzreqxtwfd5r.py
# Topologically Sorted Source Nodes: [sum_of_rows_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_3 => sum_23
# Graph fragment:
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_34, [1], True), kwargs = {})
triton_poi_fused_sum_9 = async_compile.triton('triton_poi_fused_sum_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gg/cggk3re7n2vchxwvrujwqfrtrkzz53k2ysszjkctmcorctbnrwjn.py
# Topologically Sorted Source Nodes: [sum_17], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_17 => sum_24
# Graph fragment:
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_38, [0], True), kwargs = {})
triton_poi_fused_sum_10 = async_compile.triton('triton_poi_fused_sum_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (17 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (18 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (19 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ys/cysm4kiitlxgr4z6f4ub5rd2focy27gqcmg2rwqbgxxtuy6mldou.py
# Topologically Sorted Source Nodes: [sum_of_rows_4], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_4 => sum_25
# Graph fragment:
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_42, [1], True), kwargs = {})
triton_poi_fused_sum_11 = async_compile.triton('triton_poi_fused_sum_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d3/cd3gefgcdmhdfyughxzotbsun6fbsyeopog67ravaw3wghfygqhr.py
# Topologically Sorted Source Nodes: [Q_22], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_22 => div_29
# Graph fragment:
# %mul_tensor_27 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 1), kwargs = {})
# %amax_default_27 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_27, [1], True), kwargs = {})
# %sub_tensor_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_27, %amax_default_27), kwargs = {})
# %div_tensor_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_27, 0.1), kwargs = {})
# %div_29 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_44, 4), kwargs = {})
triton_poi_fused_div_12 = async_compile.triton('triton_poi_fused_div_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (18 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (19 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (0))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tt/cttg2lnxpoxay4r27e6yymultf5wx23hbwstvoohxvcwcvlv7lne.py
# Topologically Sorted Source Nodes: [sum_Q_2], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q_2 => sum_43
# Graph fragment:
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute_64,), kwargs = {})
# %mul_tensor_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_27, 1), kwargs = {})
# %amax_default_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_4, [1], True), kwargs = {})
# %sub_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_4, %amax_default_4), kwargs = {})
# %div_tensor_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_4, 0.1), kwargs = {})
triton_per_fused_sum_13 = async_compile.triton('triton_per_fused_sum_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_13(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (32 + r0), None)
tmp9 = tl.load(in_ptr0 + (32 + (4*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (33 + (4*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (34 + (4*r2)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (35 + (4*r2)), None, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crsq6qrm3t5shscjihgidrwsneusfut3iqrrk7iex7ld6v54lro4.py
# Topologically Sorted Source Nodes: [sum_of_rows_6], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_6 => sum_44
# Graph fragment:
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_66, [1], True), kwargs = {})
triton_poi_fused_sum_14 = async_compile.triton('triton_poi_fused_sum_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p5/cp5kffayhihgup7sw7fmivxuc6kqm5s27wsgpzsyvv6f5c7cnvv2.py
# Topologically Sorted Source Nodes: [sum_31], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_31 => sum_45
# Graph fragment:
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_70, [0], True), kwargs = {})
triton_poi_fused_sum_15 = async_compile.triton('triton_poi_fused_sum_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (33 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (34 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (35 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ql/cql2xen4uqkei7lbsi5zq5b6xq5fzb7jd63dyiyjcisvyjkosvfv.py
# Topologically Sorted Source Nodes: [sum_of_rows_7], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_7 => sum_46
# Graph fragment:
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_74, [1], True), kwargs = {})
triton_poi_fused_sum_16 = async_compile.triton('triton_poi_fused_sum_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xn/cxnxquhhhxb6dojpjiwgi5c62c3wmitjezayzfjio7k2jv7vqpsd.py
# Topologically Sorted Source Nodes: [Q_37], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_37 => div_51
# Graph fragment:
# %mul_tensor_26 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 1), kwargs = {})
# %amax_default_26 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_26, [1], True), kwargs = {})
# %sub_tensor_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_26, %amax_default_26), kwargs = {})
# %div_tensor_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_26, 0.1), kwargs = {})
# %mul_tensor_19 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, 1), kwargs = {})
# %amax_default_19 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_19, [1], True), kwargs = {})
# %sub_tensor_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_19, %amax_default_19), kwargs = {})
# %div_tensor_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_19, 0.1), kwargs = {})
# %div_51 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_76, 4), kwargs = {})
triton_poi_fused_div_17 = async_compile.triton('triton_poi_fused_div_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (34 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (0))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ua/cuanikw4qrep32zrex3uz6h4wr62oltdxm3gxoi2zv2qybounuk4.py
# Topologically Sorted Source Nodes: [sum_Q_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q_3 => sum_64
# Graph fragment:
# %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute_96,), kwargs = {})
triton_per_fused_sum_18 = async_compile.triton('triton_per_fused_sum_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_18(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xx/cxxmu5ippz2osge53d6ltlbfzj363ko2cdlhkuv422dj24x4atll.py
# Topologically Sorted Source Nodes: [sum_of_rows_9], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_9 => sum_65
# Graph fragment:
# %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_98, [1], True), kwargs = {})
triton_poi_fused_sum_19 = async_compile.triton('triton_poi_fused_sum_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iv/civ4enldjkrehizi4iifjk4xmuvxwl4hfaywx27qia42wl4csidk.py
# Topologically Sorted Source Nodes: [sum_45], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_45 => sum_66
# Graph fragment:
# %sum_66 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_102, [0], True), kwargs = {})
triton_poi_fused_sum_20 = async_compile.triton('triton_poi_fused_sum_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (49 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (50 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (51 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pq/cpqg526fen4aq5cgrdhu4a2oin6acmaawz6mcehy7umciw4im4ix.py
# Topologically Sorted Source Nodes: [sum_of_rows_10], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_10 => sum_67
# Graph fragment:
# %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_106, [1], True), kwargs = {})
triton_poi_fused_sum_21 = async_compile.triton('triton_poi_fused_sum_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/n4/cn4plb4lv6xccoz526fpmx4tom4f3ah4sfy35nahqhzq5al6uycs.py
# Topologically Sorted Source Nodes: [Q_52], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_52 => div_73
# Graph fragment:
# %mul_tensor_25 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, 1), kwargs = {})
# %amax_default_25 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_25, [1], True), kwargs = {})
# %sub_tensor_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_25, %amax_default_25), kwargs = {})
# %div_tensor_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_25, 0.1), kwargs = {})
# %mul_tensor_18 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_11, 1), kwargs = {})
# %amax_default_18 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_18, [1], True), kwargs = {})
# %sub_tensor_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_18, %amax_default_18), kwargs = {})
# %div_tensor_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_18, 0.1), kwargs = {})
# %mul_tensor_11 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_19, 1), kwargs = {})
# %amax_default_11 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_11, [1], True), kwargs = {})
# %sub_tensor_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_11, %amax_default_11), kwargs = {})
# %div_tensor_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_11, 0.1), kwargs = {})
# %div_73 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_108, 4), kwargs = {})
triton_poi_fused_div_22 = async_compile.triton('triton_poi_fused_div_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (49 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (50 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (51 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (0))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
tl.store(out_ptr3 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vp/cvpb5mtrsn3w4irxrcazvcy633nic3hupp3lgti6splnbwum5omh.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_24 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, 1), kwargs = {})
# %amax_default_24 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_24, [1], True), kwargs = {})
# %sub_tensor_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_24, %amax_default_24), kwargs = {})
# %div_tensor_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_24, 0.1), kwargs = {})
# %mul_tensor_17 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, 1), kwargs = {})
# %amax_default_17 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_17, [1], True), kwargs = {})
# %sub_tensor_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_17, %amax_default_17), kwargs = {})
# %div_tensor_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_17, 0.1), kwargs = {})
# %mul_tensor_10 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_20, 1), kwargs = {})
# %amax_default_10 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_10, [1], True), kwargs = {})
# %sub_tensor_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_10, %amax_default_10), kwargs = {})
# %div_tensor_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_10, 0.1), kwargs = {})
triton_poi_fused_23 = async_compile.triton('triton_poi_fused_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_23(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/eb/cebycglrjxwo5vb3xw77tabmbrro5silpti6e23n5shzjssdcz2y.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_23 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, 1), kwargs = {})
# %amax_default_23 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_23, [1], True), kwargs = {})
# %sub_tensor_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_23, %amax_default_23), kwargs = {})
# %div_tensor_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_23, 0.1), kwargs = {})
# %mul_tensor_16 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_13, 1), kwargs = {})
# %amax_default_16 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_16, [1], True), kwargs = {})
# %sub_tensor_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_16, %amax_default_16), kwargs = {})
# %div_tensor_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_16, 0.1), kwargs = {})
# %mul_tensor_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_21, 1), kwargs = {})
# %amax_default_9 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_9, [1], True), kwargs = {})
# %sub_tensor_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_9, %amax_default_9), kwargs = {})
# %div_tensor_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_9, 0.1), kwargs = {})
triton_poi_fused_24 = async_compile.triton('triton_poi_fused_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (18 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (19 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wn/cwnlobrjuzsxonoax4oxycblbqzt6yimsshy6rjao2hwqcckct7e.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_22 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, 1), kwargs = {})
# %amax_default_22 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_22, [1], True), kwargs = {})
# %sub_tensor_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_22, %amax_default_22), kwargs = {})
# %div_tensor_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_22, 0.1), kwargs = {})
# %mul_tensor_15 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_14, 1), kwargs = {})
# %amax_default_15 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_15, [1], True), kwargs = {})
# %sub_tensor_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_15, %amax_default_15), kwargs = {})
# %div_tensor_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_15, 0.1), kwargs = {})
# %mul_tensor_8 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_22, 1), kwargs = {})
# %amax_default_8 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_8, [1], True), kwargs = {})
# %sub_tensor_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_8, %amax_default_8), kwargs = {})
# %div_tensor_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_8, 0.1), kwargs = {})
triton_poi_fused_25 = async_compile.triton('triton_poi_fused_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (34 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xb/cxbkq3bw5hyime7xxe4bzxgd2nnpfykwtvk22mjef7aetgsc2zrj.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_21 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, 1), kwargs = {})
# %amax_default_21 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_21, [1], True), kwargs = {})
# %sub_tensor_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_21, %amax_default_21), kwargs = {})
# %div_tensor_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_21, 0.1), kwargs = {})
# %mul_tensor_14 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_15, 1), kwargs = {})
# %amax_default_14 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_14, [1], True), kwargs = {})
# %sub_tensor_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_14, %amax_default_14), kwargs = {})
# %div_tensor_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_14, 0.1), kwargs = {})
# %mul_tensor_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_23, 1), kwargs = {})
# %amax_default_7 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_7, [1], True), kwargs = {})
# %sub_tensor_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_7, %amax_default_7), kwargs = {})
# %div_tensor_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_7, 0.1), kwargs = {})
triton_poi_fused_26 = async_compile.triton('triton_poi_fused_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_26(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (49 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (50 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (51 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
tl.store(out_ptr2 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pn/cpnagcvhwbqe2rcvfgvmjcaxohah7uzhblpuio2lg6rdtez25o2c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_28, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 0.1), kwargs = {})
triton_poi_fused_27 = async_compile.triton('triton_poi_fused_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_27(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iw/ciw3zbnnlvqlllmj5a5gpcprapqy57rowpipu7cpawxbmcstu2i3.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_29, 1), kwargs = {})
# %amax_default_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_2, [1], True), kwargs = {})
# %sub_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_2, %amax_default_2), kwargs = {})
# %div_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_2, 0.1), kwargs = {})
triton_poi_fused_28 = async_compile.triton('triton_poi_fused_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_28(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (18 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (19 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/24/c24vhpuzwkiqxza7n6dhlgskxjaib3f3u2rsi724hw7jp7njwrjn.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_30, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 0.1), kwargs = {})
triton_poi_fused_29 = async_compile.triton('triton_poi_fused_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_29(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (34 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qb/cqb4xqvf3w6zhnq2pjwk5ebvybb74fgqjxi3x4voqh2rgecnzvjg.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_31, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 0.1), kwargs = {})
triton_poi_fused_30 = async_compile.triton('triton_poi_fused_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_30(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (49 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (50 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (51 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xt/cxtuij5x42zgn4axuvkz6eiv2wtv6voipl2dxqeobqxx2vdpsins.py
# Topologically Sorted Source Nodes: [log_softmax, mul, sum_8, mean, neg, subloss, log_softmax_1, mul_1, sum_9, mean_1, neg_1, subloss_1, log_softmax_2, mul_2, sum_10, mean_2, neg_2, subloss_2, log_softmax_3, mul_3, sum_11, mean_3, neg_3, subloss_3, log_softmax_4, mul_4, sum_12, mean_4, neg_4, subloss_4, log_softmax_5, mul_5, sum_13, mean_5, neg_5, subloss_5, log_softmax_6, mul_6, sum_14, mean_6, neg_6, subloss_6, truediv_8, loss, log_softmax_7, mul_7, sum_22, mean_7, neg_7, subloss_7, log_softmax_8, mul_8, sum_23, mean_8, neg_8, subloss_8, log_softmax_9, mul_9, sum_24, mean_9, neg_9, subloss_9, log_softmax_10, mul_10, sum_25, mean_10, neg_10, subloss_10, log_softmax_11, mul_11, sum_26, mean_11, neg_11, subloss_11, log_softmax_12, mul_12, sum_27, mean_12, neg_12, subloss_12, log_softmax_13, mul_13, sum_28, mean_13, neg_13, subloss_13, truediv_17, loss_1, log_softmax_14, mul_14, sum_36, mean_14, neg_14, subloss_14, log_softmax_15, mul_15, sum_37, mean_15, neg_15, subloss_15, log_softmax_16, mul_16, sum_38, mean_16, neg_16, subloss_16, log_softmax_17, mul_17, sum_39, mean_17, neg_17, subloss_17, log_softmax_18, mul_18, sum_40, mean_18, neg_18, subloss_18, log_softmax_19, mul_19, sum_41, mean_19, neg_19, subloss_19, log_softmax_20, mul_20, sum_42, mean_20, neg_20, subloss_20, truediv_26, loss_2, log_softmax_21, mul_21, sum_50, mean_21, neg_21, subloss_21, log_softmax_22, mul_22, sum_51, mean_22, neg_22, subloss_22, log_softmax_23, mul_23, sum_52, mean_23, neg_23, subloss_23, log_softmax_24, mul_24, sum_53, mean_24, neg_24, subloss_24, log_softmax_25, mul_25, sum_54, mean_25, neg_25, subloss_25, log_softmax_26, mul_26, sum_55, mean_26, neg_26, subloss_26, log_softmax_27, mul_27, sum_56, mean_27, neg_27, subloss_27, truediv_35, loss_3, truediv_36], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.mean, aten.neg, aten.add, aten.div]
# Source node to ATen node mapping:
# log_softmax => exp_1, log, sub_1, sum_8
# log_softmax_1 => exp_2, log_1, sub_3, sum_10
# log_softmax_10 => exp_12, log_10, sub_21, sum_35
# log_softmax_11 => exp_13, log_11, sub_23, sum_37
# log_softmax_12 => exp_14, log_12, sub_25, sum_39
# log_softmax_13 => exp_15, log_13, sub_27, sum_41
# log_softmax_14 => exp_17, log_14, sub_29, sum_50
# log_softmax_15 => exp_18, log_15, sub_31, sum_52
# log_softmax_16 => exp_19, log_16, sub_33, sum_54
# log_softmax_17 => exp_20, log_17, sub_35, sum_56
# log_softmax_18 => exp_21, log_18, sub_37, sum_58
# log_softmax_19 => exp_22, log_19, sub_39, sum_60
# log_softmax_2 => exp_3, log_2, sub_5, sum_12
# log_softmax_20 => exp_23, log_20, sub_41, sum_62
# log_softmax_21 => exp_25, log_21, sub_43, sum_71
# log_softmax_22 => exp_26, log_22, sub_45, sum_73
# log_softmax_23 => exp_27, log_23, sub_47, sum_75
# log_softmax_24 => exp_28, log_24, sub_49, sum_77
# log_softmax_25 => exp_29, log_25, sub_51, sum_79
# log_softmax_26 => exp_30, log_26, sub_53, sum_81
# log_softmax_27 => exp_31, log_27, sub_55, sum_83
# log_softmax_3 => exp_4, log_3, sub_7, sum_14
# log_softmax_4 => exp_5, log_4, sub_9, sum_16
# log_softmax_5 => exp_6, log_5, sub_11, sum_18
# log_softmax_6 => exp_7, log_6, sub_13, sum_20
# log_softmax_7 => exp_9, log_7, sub_15, sum_29
# log_softmax_8 => exp_10, log_8, sub_17, sum_31
# log_softmax_9 => exp_11, log_9, sub_19, sum_33
# loss => add_7
# loss_1 => add_15
# loss_2 => add_23
# loss_3 => add_31
# mean => mean
# mean_1 => mean_1
# mean_10 => mean_10
# mean_11 => mean_11
# mean_12 => mean_12
# mean_13 => mean_13
# mean_14 => mean_14
# mean_15 => mean_15
# mean_16 => mean_16
# mean_17 => mean_17
# mean_18 => mean_18
# mean_19 => mean_19
# mean_2 => mean_2
# mean_20 => mean_20
# mean_21 => mean_21
# mean_22 => mean_22
# mean_23 => mean_23
# mean_24 => mean_24
# mean_25 => mean_25
# mean_26 => mean_26
# mean_27 => mean_27
# mean_3 => mean_3
# mean_4 => mean_4
# mean_5 => mean_5
# mean_6 => mean_6
# mean_7 => mean_7
# mean_8 => mean_8
# mean_9 => mean_9
# mul => mul_1
# mul_1 => mul_2
# mul_10 => mul_12
# mul_11 => mul_13
# mul_12 => mul_14
# mul_13 => mul_15
# mul_14 => mul_17
# mul_15 => mul_18
# mul_16 => mul_19
# mul_17 => mul_20
# mul_18 => mul_21
# mul_19 => mul_22
# mul_2 => mul_3
# mul_20 => mul_23
# mul_21 => mul_25
# mul_22 => mul_26
# mul_23 => mul_27
# mul_24 => mul_28
# mul_25 => mul_29
# mul_26 => mul_30
# mul_27 => mul_31
# mul_3 => mul_4
# mul_4 => mul_5
# mul_5 => mul_6
# mul_6 => mul_7
# mul_7 => mul_9
# mul_8 => mul_10
# mul_9 => mul_11
# neg => neg
# neg_1 => neg_1
# neg_10 => neg_10
# neg_11 => neg_11
# neg_12 => neg_12
# neg_13 => neg_13
# neg_14 => neg_14
# neg_15 => neg_15
# neg_16 => neg_16
# neg_17 => neg_17
# neg_18 => neg_18
# neg_19 => neg_19
# neg_2 => neg_2
# neg_20 => neg_20
# neg_21 => neg_21
# neg_22 => neg_22
# neg_23 => neg_23
# neg_24 => neg_24
# neg_25 => neg_25
# neg_26 => neg_26
# neg_27 => neg_27
# neg_3 => neg_3
# neg_4 => neg_4
# neg_5 => neg_5
# neg_6 => neg_6
# neg_7 => neg_7
# neg_8 => neg_8
# neg_9 => neg_9
# subloss => add
# subloss_1 => add_1
# subloss_10 => add_11
# subloss_11 => add_12
# subloss_12 => add_13
# subloss_13 => add_14
# subloss_14 => add_16
# subloss_15 => add_17
# subloss_16 => add_18
# subloss_17 => add_19
# subloss_18 => add_20
# subloss_19 => add_21
# subloss_2 => add_2
# subloss_20 => add_22
# subloss_21 => add_24
# subloss_22 => add_25
# subloss_23 => add_26
# subloss_24 => add_27
# subloss_25 => add_28
# subloss_26 => add_29
# subloss_27 => add_30
# subloss_3 => add_3
# subloss_4 => add_4
# subloss_5 => add_5
# subloss_6 => add_6
# subloss_7 => add_8
# subloss_8 => add_9
# subloss_9 => add_10
# sum_10 => sum_13
# sum_11 => sum_15
# sum_12 => sum_17
# sum_13 => sum_19
# sum_14 => sum_21
# sum_22 => sum_30
# sum_23 => sum_32
# sum_24 => sum_34
# sum_25 => sum_36
# sum_26 => sum_38
# sum_27 => sum_40
# sum_28 => sum_42
# sum_36 => sum_51
# sum_37 => sum_53
# sum_38 => sum_55
# sum_39 => sum_57
# sum_40 => sum_59
# sum_41 => sum_61
# sum_42 => sum_63
# sum_50 => sum_72
# sum_51 => sum_74
# sum_52 => sum_76
# sum_53 => sum_78
# sum_54 => sum_80
# sum_55 => sum_82
# sum_56 => sum_84
# sum_8 => sum_9
# sum_9 => sum_11
# truediv_17 => div_43
# truediv_26 => div_65
# truediv_35 => div_87
# truediv_36 => div_88
# truediv_8 => div_21
# Graph fragment:
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_27,), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_8,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_27, %log), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_1), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_9,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 0.0), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_26,), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_10,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_26, %log_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_3), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_11,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %neg_1), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_25,), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_12,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_25, %log_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_5), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_13,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_2,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %neg_2), kwargs = {})
# %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_24,), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [1], True), kwargs = {})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_14,), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_24, %log_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_7), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_15,), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_3,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %neg_3), kwargs = {})
# %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_23,), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [1], True), kwargs = {})
# %log_4 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_16,), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_23, %log_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_9), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [1]), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_17,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_4,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %neg_4), kwargs = {})
# %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_22,), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [1], True), kwargs = {})
# %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_18,), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_22, %log_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_11), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1]), kwargs = {})
# %mean_5 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_19,), kwargs = {})
# %neg_5 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_5,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %neg_5), kwargs = {})
# %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_21,), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_7, [1], True), kwargs = {})
# %log_6 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_20,), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_21, %log_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_31, %sub_13), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [1]), kwargs = {})
# %mean_6 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_21,), kwargs = {})
# %neg_6 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_6,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %neg_6), kwargs = {})
# %div_21 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_6, 7), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_21, 0.0), kwargs = {})
# %exp_9 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_20,), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_9, [1], True), kwargs = {})
# %log_7 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_29,), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_20, %log_7), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_15), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [1]), kwargs = {})
# %mean_7 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_30,), kwargs = {})
# %neg_7 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_7,), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_7, 0.0), kwargs = {})
# %exp_10 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_19,), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_10, [1], True), kwargs = {})
# %log_8 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_31,), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_19, %log_8), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_17), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [1]), kwargs = {})
# %mean_8 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_32,), kwargs = {})
# %neg_8 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_8,), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %neg_8), kwargs = {})
# %exp_11 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_18,), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_11, [1], True), kwargs = {})
# %log_9 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_33,), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_18, %log_9), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_19), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [1]), kwargs = {})
# %mean_9 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_34,), kwargs = {})
# %neg_9 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_9,), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %neg_9), kwargs = {})
# %exp_12 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_17,), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_12, [1], True), kwargs = {})
# %log_10 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_35,), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_17, %log_10), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_21), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [1]), kwargs = {})
# %mean_10 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_36,), kwargs = {})
# %neg_10 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_10,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %neg_10), kwargs = {})
# %exp_13 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_16,), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_13, [1], True), kwargs = {})
# %log_11 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_37,), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_16, %log_11), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_23), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [1]), kwargs = {})
# %mean_11 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_38,), kwargs = {})
# %neg_11 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_11,), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %neg_11), kwargs = {})
# %exp_14 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_15,), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_14, [1], True), kwargs = {})
# %log_12 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_39,), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_15, %log_12), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_25), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [1]), kwargs = {})
# %mean_12 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_40,), kwargs = {})
# %neg_12 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_12,), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %neg_12), kwargs = {})
# %exp_15 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_14,), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_15, [1], True), kwargs = {})
# %log_13 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_41,), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_14, %log_13), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_63, %sub_27), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [1]), kwargs = {})
# %mean_13 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_42,), kwargs = {})
# %neg_13 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_13,), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %neg_13), kwargs = {})
# %div_43 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_14, 7), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %div_43), kwargs = {})
# %exp_17 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_13,), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_17, [1], True), kwargs = {})
# %log_14 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_50,), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_13, %log_14), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_29), kwargs = {})
# %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [1]), kwargs = {})
# %mean_14 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_51,), kwargs = {})
# %neg_14 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_14,), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_14, 0.0), kwargs = {})
# %exp_18 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_12,), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_18, [1], True), kwargs = {})
# %log_15 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_52,), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_12, %log_15), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_31), kwargs = {})
# %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [1]), kwargs = {})
# %mean_15 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_53,), kwargs = {})
# %neg_15 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_15,), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %neg_15), kwargs = {})
# %exp_19 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_11,), kwargs = {})
# %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_19, [1], True), kwargs = {})
# %log_16 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_54,), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_11, %log_16), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_33), kwargs = {})
# %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [1]), kwargs = {})
# %mean_16 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_55,), kwargs = {})
# %neg_16 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_16,), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %neg_16), kwargs = {})
# %exp_20 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_10,), kwargs = {})
# %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_20, [1], True), kwargs = {})
# %log_17 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_56,), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_10, %log_17), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_35), kwargs = {})
# %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [1]), kwargs = {})
# %mean_17 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_57,), kwargs = {})
# %neg_17 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_17,), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %neg_17), kwargs = {})
# %exp_21 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_9,), kwargs = {})
# %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_21, [1], True), kwargs = {})
# %log_18 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_58,), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_9, %log_18), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_37), kwargs = {})
# %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [1]), kwargs = {})
# %mean_18 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_59,), kwargs = {})
# %neg_18 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_18,), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %neg_18), kwargs = {})
# %exp_22 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_8,), kwargs = {})
# %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_22, [1], True), kwargs = {})
# %log_19 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_60,), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_8, %log_19), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_39), kwargs = {})
# %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [1]), kwargs = {})
# %mean_19 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_61,), kwargs = {})
# %neg_19 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_19,), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %neg_19), kwargs = {})
# %exp_23 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_7,), kwargs = {})
# %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_23, [1], True), kwargs = {})
# %log_20 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_62,), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_7, %log_20), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_95, %sub_41), kwargs = {})
# %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [1]), kwargs = {})
# %mean_20 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_63,), kwargs = {})
# %neg_20 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_20,), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_21, %neg_20), kwargs = {})
# %div_65 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_22, 7), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_15, %div_65), kwargs = {})
# %exp_25 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_6,), kwargs = {})
# %sum_71 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_25, [1], True), kwargs = {})
# %log_21 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_71,), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_6, %log_21), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_43), kwargs = {})
# %sum_72 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [1]), kwargs = {})
# %mean_21 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_72,), kwargs = {})
# %neg_21 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_21,), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_21, 0.0), kwargs = {})
# %exp_26 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_5,), kwargs = {})
# %sum_73 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_26, [1], True), kwargs = {})
# %log_22 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_73,), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_5, %log_22), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_45), kwargs = {})
# %sum_74 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [1]), kwargs = {})
# %mean_22 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_74,), kwargs = {})
# %neg_22 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_22,), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_24, %neg_22), kwargs = {})
# %exp_27 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_4,), kwargs = {})
# %sum_75 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_27, [1], True), kwargs = {})
# %log_23 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_75,), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_4, %log_23), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_47), kwargs = {})
# %sum_76 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [1]), kwargs = {})
# %mean_23 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_76,), kwargs = {})
# %neg_23 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_23,), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %neg_23), kwargs = {})
# %exp_28 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
# %sum_77 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_28, [1], True), kwargs = {})
# %log_24 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_77,), kwargs = {})
# %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_3, %log_24), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_49), kwargs = {})
# %sum_78 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [1]), kwargs = {})
# %mean_24 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_78,), kwargs = {})
# %neg_24 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_24,), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %neg_24), kwargs = {})
# %exp_29 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_2,), kwargs = {})
# %sum_79 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_29, [1], True), kwargs = {})
# %log_25 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_79,), kwargs = {})
# %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_2, %log_25), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_51), kwargs = {})
# %sum_80 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [1]), kwargs = {})
# %mean_25 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_80,), kwargs = {})
# %neg_25 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_25,), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_27, %neg_25), kwargs = {})
# %exp_30 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_81 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_30, [1], True), kwargs = {})
# %log_26 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_81,), kwargs = {})
# %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log_26), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_53), kwargs = {})
# %sum_82 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [1]), kwargs = {})
# %mean_26 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_82,), kwargs = {})
# %neg_26 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_26,), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %neg_26), kwargs = {})
# %exp_31 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_83 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_31, [1], True), kwargs = {})
# %log_27 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_83,), kwargs = {})
# %sub_55 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor, %log_27), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_127, %sub_55), kwargs = {})
# %sum_84 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [1]), kwargs = {})
# %mean_27 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_84,), kwargs = {})
# %neg_27 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_27,), kwargs = {})
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %neg_27), kwargs = {})
# %div_87 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_30, 7), kwargs = {})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %div_87), kwargs = {})
# %div_88 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_31, 4), kwargs = {})
triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31 = async_compile.triton('triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: 'i32', 34: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {33: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), equal_to_1=(33,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 128, 'num_reduction': 28, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr4 + (4*r0), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr4 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr4 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr4 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr5 + (4*r0), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr5 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr5 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp116 = tl.load(in_ptr5 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp134 = tl.load(in_ptr6 + (4*r0), None, eviction_policy='evict_last')
tmp136 = tl.load(in_ptr6 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp139 = tl.load(in_ptr6 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp142 = tl.load(in_ptr6 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp160 = tl.load(in_ptr7 + (4*r0), None, eviction_policy='evict_last')
tmp162 = tl.load(in_ptr7 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp165 = tl.load(in_ptr7 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp168 = tl.load(in_ptr7 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp186 = tl.load(in_ptr8 + (4*r0), None, eviction_policy='evict_last')
tmp187 = tl.load(in_ptr9 + (4*r0), None, eviction_policy='evict_last')
tmp189 = tl.load(in_ptr9 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp192 = tl.load(in_ptr9 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp195 = tl.load(in_ptr9 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp201 = tl.load(in_ptr8 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp205 = tl.load(in_ptr8 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp209 = tl.load(in_ptr8 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp216 = tl.load(in_ptr10 + (4*r0), None, eviction_policy='evict_last')
tmp218 = tl.load(in_ptr10 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp221 = tl.load(in_ptr10 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp224 = tl.load(in_ptr10 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp242 = tl.load(in_ptr11 + (4*r0), None, eviction_policy='evict_last')
tmp244 = tl.load(in_ptr11 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp247 = tl.load(in_ptr11 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp250 = tl.load(in_ptr11 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp268 = tl.load(in_ptr12 + (4*r0), None, eviction_policy='evict_last')
tmp270 = tl.load(in_ptr12 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp273 = tl.load(in_ptr12 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp276 = tl.load(in_ptr12 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp294 = tl.load(in_ptr13 + (4*r0), None, eviction_policy='evict_last')
tmp296 = tl.load(in_ptr13 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp299 = tl.load(in_ptr13 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp302 = tl.load(in_ptr13 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp320 = tl.load(in_ptr14 + (4*r0), None, eviction_policy='evict_last')
tmp322 = tl.load(in_ptr14 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp325 = tl.load(in_ptr14 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp328 = tl.load(in_ptr14 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp346 = tl.load(in_ptr15 + (4*r0), None, eviction_policy='evict_last')
tmp348 = tl.load(in_ptr15 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp351 = tl.load(in_ptr15 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp354 = tl.load(in_ptr15 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp372 = tl.load(in_ptr16 + (4*r0), None, eviction_policy='evict_last')
tmp373 = tl.load(in_ptr17 + (4*r0), None, eviction_policy='evict_last')
tmp375 = tl.load(in_ptr17 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp378 = tl.load(in_ptr17 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp381 = tl.load(in_ptr17 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp387 = tl.load(in_ptr16 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp391 = tl.load(in_ptr16 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp395 = tl.load(in_ptr16 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp402 = tl.load(in_ptr18 + (4*r0), None, eviction_policy='evict_last')
tmp404 = tl.load(in_ptr18 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp407 = tl.load(in_ptr18 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp410 = tl.load(in_ptr18 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp428 = tl.load(in_ptr19 + (4*r0), None, eviction_policy='evict_last')
tmp430 = tl.load(in_ptr19 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp433 = tl.load(in_ptr19 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp436 = tl.load(in_ptr19 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp454 = tl.load(in_ptr20 + (4*r0), None, eviction_policy='evict_last')
tmp456 = tl.load(in_ptr20 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp459 = tl.load(in_ptr20 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp462 = tl.load(in_ptr20 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp480 = tl.load(in_ptr21 + (4*r0), None, eviction_policy='evict_last')
tmp482 = tl.load(in_ptr21 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp485 = tl.load(in_ptr21 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp488 = tl.load(in_ptr21 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp506 = tl.load(in_ptr22 + (4*r0), None, eviction_policy='evict_last')
tmp508 = tl.load(in_ptr22 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp511 = tl.load(in_ptr22 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp514 = tl.load(in_ptr22 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp532 = tl.load(in_ptr23 + (4*r0), None, eviction_policy='evict_last')
tmp534 = tl.load(in_ptr23 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp537 = tl.load(in_ptr23 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp540 = tl.load(in_ptr23 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp558 = tl.load(in_ptr24 + (4*r0), None, eviction_policy='evict_last')
tmp559 = tl.load(in_ptr25 + (4*r0), None, eviction_policy='evict_last')
tmp561 = tl.load(in_ptr25 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp564 = tl.load(in_ptr25 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp567 = tl.load(in_ptr25 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp573 = tl.load(in_ptr24 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp577 = tl.load(in_ptr24 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp581 = tl.load(in_ptr24 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp588 = tl.load(in_ptr26 + (4*r0), None, eviction_policy='evict_last')
tmp590 = tl.load(in_ptr26 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp593 = tl.load(in_ptr26 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp596 = tl.load(in_ptr26 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp614 = tl.load(in_ptr27 + (4*r0), None, eviction_policy='evict_last')
tmp616 = tl.load(in_ptr27 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp619 = tl.load(in_ptr27 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp622 = tl.load(in_ptr27 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp640 = tl.load(in_ptr28 + (4*r0), None, eviction_policy='evict_last')
tmp642 = tl.load(in_ptr28 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp645 = tl.load(in_ptr28 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp648 = tl.load(in_ptr28 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp666 = tl.load(in_ptr29 + (4*r0), None, eviction_policy='evict_last')
tmp668 = tl.load(in_ptr29 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp671 = tl.load(in_ptr29 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp674 = tl.load(in_ptr29 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp692 = tl.load(in_ptr30 + (4*r0), None, eviction_policy='evict_last')
tmp694 = tl.load(in_ptr30 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp697 = tl.load(in_ptr30 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp700 = tl.load(in_ptr30 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp718 = tl.load(in_ptr31 + (4*r0), None, eviction_policy='evict_last')
tmp720 = tl.load(in_ptr31 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp723 = tl.load(in_ptr31 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp726 = tl.load(in_ptr31 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp30 - tmp41
tmp43 = tmp0 * tmp42
tmp44 = tmp32 - tmp41
tmp45 = tmp15 * tmp44
tmp46 = tmp43 + tmp45
tmp47 = tmp35 - tmp41
tmp48 = tmp19 * tmp47
tmp49 = tmp46 + tmp48
tmp50 = tmp38 - tmp41
tmp51 = tmp23 * tmp50
tmp52 = tmp49 + tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = tl.sum(tmp53, 1)[:, None]
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp56 - tmp67
tmp69 = tmp0 * tmp68
tmp70 = tmp58 - tmp67
tmp71 = tmp15 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp61 - tmp67
tmp74 = tmp19 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp64 - tmp67
tmp77 = tmp23 * tmp76
tmp78 = tmp75 + tmp77
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp83 = tl_math.exp(tmp82)
tmp85 = tl_math.exp(tmp84)
tmp86 = tmp83 + tmp85
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp86 + tmp88
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tl_math.log(tmp92)
tmp94 = tmp82 - tmp93
tmp95 = tmp0 * tmp94
tmp96 = tmp84 - tmp93
tmp97 = tmp15 * tmp96
tmp98 = tmp95 + tmp97
tmp99 = tmp87 - tmp93
tmp100 = tmp19 * tmp99
tmp101 = tmp98 + tmp100
tmp102 = tmp90 - tmp93
tmp103 = tmp23 * tmp102
tmp104 = tmp101 + tmp103
tmp105 = tl.broadcast_to(tmp104, [XBLOCK, RBLOCK])
tmp107 = tl.sum(tmp105, 1)[:, None]
tmp109 = tl_math.exp(tmp108)
tmp111 = tl_math.exp(tmp110)
tmp112 = tmp109 + tmp111
tmp114 = tl_math.exp(tmp113)
tmp115 = tmp112 + tmp114
tmp117 = tl_math.exp(tmp116)
tmp118 = tmp115 + tmp117
tmp119 = tl_math.log(tmp118)
tmp120 = tmp108 - tmp119
tmp121 = tmp0 * tmp120
tmp122 = tmp110 - tmp119
tmp123 = tmp15 * tmp122
tmp124 = tmp121 + tmp123
tmp125 = tmp113 - tmp119
tmp126 = tmp19 * tmp125
tmp127 = tmp124 + tmp126
tmp128 = tmp116 - tmp119
tmp129 = tmp23 * tmp128
tmp130 = tmp127 + tmp129
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp133 = tl.sum(tmp131, 1)[:, None]
tmp135 = tl_math.exp(tmp134)
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tmp143 = tl_math.exp(tmp142)
tmp144 = tmp141 + tmp143
tmp145 = tl_math.log(tmp144)
tmp146 = tmp134 - tmp145
tmp147 = tmp0 * tmp146
tmp148 = tmp136 - tmp145
tmp149 = tmp15 * tmp148
tmp150 = tmp147 + tmp149
tmp151 = tmp139 - tmp145
tmp152 = tmp19 * tmp151
tmp153 = tmp150 + tmp152
tmp154 = tmp142 - tmp145
tmp155 = tmp23 * tmp154
tmp156 = tmp153 + tmp155
tmp157 = tl.broadcast_to(tmp156, [XBLOCK, RBLOCK])
tmp159 = tl.sum(tmp157, 1)[:, None]
tmp161 = tl_math.exp(tmp160)
tmp163 = tl_math.exp(tmp162)
tmp164 = tmp161 + tmp163
tmp166 = tl_math.exp(tmp165)
tmp167 = tmp164 + tmp166
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp167 + tmp169
tmp171 = tl_math.log(tmp170)
tmp172 = tmp160 - tmp171
tmp173 = tmp0 * tmp172
tmp174 = tmp162 - tmp171
tmp175 = tmp15 * tmp174
tmp176 = tmp173 + tmp175
tmp177 = tmp165 - tmp171
tmp178 = tmp19 * tmp177
tmp179 = tmp176 + tmp178
tmp180 = tmp168 - tmp171
tmp181 = tmp23 * tmp180
tmp182 = tmp179 + tmp181
tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK])
tmp185 = tl.sum(tmp183, 1)[:, None]
tmp188 = tl_math.exp(tmp187)
tmp190 = tl_math.exp(tmp189)
tmp191 = tmp188 + tmp190
tmp193 = tl_math.exp(tmp192)
tmp194 = tmp191 + tmp193
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp194 + tmp196
tmp198 = tl_math.log(tmp197)
tmp199 = tmp187 - tmp198
tmp200 = tmp186 * tmp199
tmp202 = tmp189 - tmp198
tmp203 = tmp201 * tmp202
tmp204 = tmp200 + tmp203
tmp206 = tmp192 - tmp198
tmp207 = tmp205 * tmp206
tmp208 = tmp204 + tmp207
tmp210 = tmp195 - tmp198
tmp211 = tmp209 * tmp210
tmp212 = tmp208 + tmp211
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp215 = tl.sum(tmp213, 1)[:, None]
tmp217 = tl_math.exp(tmp216)
tmp219 = tl_math.exp(tmp218)
tmp220 = tmp217 + tmp219
tmp222 = tl_math.exp(tmp221)
tmp223 = tmp220 + tmp222
tmp225 = tl_math.exp(tmp224)
tmp226 = tmp223 + tmp225
tmp227 = tl_math.log(tmp226)
tmp228 = tmp216 - tmp227
tmp229 = tmp186 * tmp228
tmp230 = tmp218 - tmp227
tmp231 = tmp201 * tmp230
tmp232 = tmp229 + tmp231
tmp233 = tmp221 - tmp227
tmp234 = tmp205 * tmp233
tmp235 = tmp232 + tmp234
tmp236 = tmp224 - tmp227
tmp237 = tmp209 * tmp236
tmp238 = tmp235 + tmp237
tmp239 = tl.broadcast_to(tmp238, [XBLOCK, RBLOCK])
tmp241 = tl.sum(tmp239, 1)[:, None]
tmp243 = tl_math.exp(tmp242)
tmp245 = tl_math.exp(tmp244)
tmp246 = tmp243 + tmp245
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp246 + tmp248
tmp251 = tl_math.exp(tmp250)
tmp252 = tmp249 + tmp251
tmp253 = tl_math.log(tmp252)
tmp254 = tmp242 - tmp253
tmp255 = tmp186 * tmp254
tmp256 = tmp244 - tmp253
tmp257 = tmp201 * tmp256
tmp258 = tmp255 + tmp257
tmp259 = tmp247 - tmp253
tmp260 = tmp205 * tmp259
tmp261 = tmp258 + tmp260
tmp262 = tmp250 - tmp253
tmp263 = tmp209 * tmp262
tmp264 = tmp261 + tmp263
tmp265 = tl.broadcast_to(tmp264, [XBLOCK, RBLOCK])
tmp267 = tl.sum(tmp265, 1)[:, None]
tmp269 = tl_math.exp(tmp268)
tmp271 = tl_math.exp(tmp270)
tmp272 = tmp269 + tmp271
tmp274 = tl_math.exp(tmp273)
tmp275 = tmp272 + tmp274
tmp277 = tl_math.exp(tmp276)
tmp278 = tmp275 + tmp277
tmp279 = tl_math.log(tmp278)
tmp280 = tmp268 - tmp279
tmp281 = tmp186 * tmp280
tmp282 = tmp270 - tmp279
tmp283 = tmp201 * tmp282
tmp284 = tmp281 + tmp283
tmp285 = tmp273 - tmp279
tmp286 = tmp205 * tmp285
tmp287 = tmp284 + tmp286
tmp288 = tmp276 - tmp279
tmp289 = tmp209 * tmp288
tmp290 = tmp287 + tmp289
tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK])
tmp293 = tl.sum(tmp291, 1)[:, None]
tmp295 = tl_math.exp(tmp294)
tmp297 = tl_math.exp(tmp296)
tmp298 = tmp295 + tmp297
tmp300 = tl_math.exp(tmp299)
tmp301 = tmp298 + tmp300
tmp303 = tl_math.exp(tmp302)
tmp304 = tmp301 + tmp303
tmp305 = tl_math.log(tmp304)
tmp306 = tmp294 - tmp305
tmp307 = tmp186 * tmp306
tmp308 = tmp296 - tmp305
tmp309 = tmp201 * tmp308
tmp310 = tmp307 + tmp309
tmp311 = tmp299 - tmp305
tmp312 = tmp205 * tmp311
tmp313 = tmp310 + tmp312
tmp314 = tmp302 - tmp305
tmp315 = tmp209 * tmp314
tmp316 = tmp313 + tmp315
tmp317 = tl.broadcast_to(tmp316, [XBLOCK, RBLOCK])
tmp319 = tl.sum(tmp317, 1)[:, None]
tmp321 = tl_math.exp(tmp320)
tmp323 = tl_math.exp(tmp322)
tmp324 = tmp321 + tmp323
tmp326 = tl_math.exp(tmp325)
tmp327 = tmp324 + tmp326
tmp329 = tl_math.exp(tmp328)
tmp330 = tmp327 + tmp329
tmp331 = tl_math.log(tmp330)
tmp332 = tmp320 - tmp331
tmp333 = tmp186 * tmp332
tmp334 = tmp322 - tmp331
tmp335 = tmp201 * tmp334
tmp336 = tmp333 + tmp335
tmp337 = tmp325 - tmp331
tmp338 = tmp205 * tmp337
tmp339 = tmp336 + tmp338
tmp340 = tmp328 - tmp331
tmp341 = tmp209 * tmp340
tmp342 = tmp339 + tmp341
tmp343 = tl.broadcast_to(tmp342, [XBLOCK, RBLOCK])
tmp345 = tl.sum(tmp343, 1)[:, None]
tmp347 = tl_math.exp(tmp346)
tmp349 = tl_math.exp(tmp348)
tmp350 = tmp347 + tmp349
tmp352 = tl_math.exp(tmp351)
tmp353 = tmp350 + tmp352
tmp355 = tl_math.exp(tmp354)
tmp356 = tmp353 + tmp355
tmp357 = tl_math.log(tmp356)
tmp358 = tmp346 - tmp357
tmp359 = tmp186 * tmp358
tmp360 = tmp348 - tmp357
tmp361 = tmp201 * tmp360
tmp362 = tmp359 + tmp361
tmp363 = tmp351 - tmp357
tmp364 = tmp205 * tmp363
tmp365 = tmp362 + tmp364
tmp366 = tmp354 - tmp357
tmp367 = tmp209 * tmp366
tmp368 = tmp365 + tmp367
tmp369 = tl.broadcast_to(tmp368, [XBLOCK, RBLOCK])
tmp371 = tl.sum(tmp369, 1)[:, None]
tmp374 = tl_math.exp(tmp373)
tmp376 = tl_math.exp(tmp375)
tmp377 = tmp374 + tmp376
tmp379 = tl_math.exp(tmp378)
tmp380 = tmp377 + tmp379
tmp382 = tl_math.exp(tmp381)
tmp383 = tmp380 + tmp382
tmp384 = tl_math.log(tmp383)
tmp385 = tmp373 - tmp384
tmp386 = tmp372 * tmp385
tmp388 = tmp375 - tmp384
tmp389 = tmp387 * tmp388
tmp390 = tmp386 + tmp389
tmp392 = tmp378 - tmp384
tmp393 = tmp391 * tmp392
tmp394 = tmp390 + tmp393
tmp396 = tmp381 - tmp384
tmp397 = tmp395 * tmp396
tmp398 = tmp394 + tmp397
tmp399 = tl.broadcast_to(tmp398, [XBLOCK, RBLOCK])
tmp401 = tl.sum(tmp399, 1)[:, None]
tmp403 = tl_math.exp(tmp402)
tmp405 = tl_math.exp(tmp404)
tmp406 = tmp403 + tmp405
tmp408 = tl_math.exp(tmp407)
tmp409 = tmp406 + tmp408
tmp411 = tl_math.exp(tmp410)
tmp412 = tmp409 + tmp411
tmp413 = tl_math.log(tmp412)
tmp414 = tmp402 - tmp413
tmp415 = tmp372 * tmp414
tmp416 = tmp404 - tmp413
tmp417 = tmp387 * tmp416
tmp418 = tmp415 + tmp417
tmp419 = tmp407 - tmp413
tmp420 = tmp391 * tmp419
tmp421 = tmp418 + tmp420
tmp422 = tmp410 - tmp413
tmp423 = tmp395 * tmp422
tmp424 = tmp421 + tmp423
tmp425 = tl.broadcast_to(tmp424, [XBLOCK, RBLOCK])
tmp427 = tl.sum(tmp425, 1)[:, None]
tmp429 = tl_math.exp(tmp428)
tmp431 = tl_math.exp(tmp430)
tmp432 = tmp429 + tmp431
tmp434 = tl_math.exp(tmp433)
tmp435 = tmp432 + tmp434
tmp437 = tl_math.exp(tmp436)
tmp438 = tmp435 + tmp437
tmp439 = tl_math.log(tmp438)
tmp440 = tmp428 - tmp439
tmp441 = tmp372 * tmp440
tmp442 = tmp430 - tmp439
tmp443 = tmp387 * tmp442
tmp444 = tmp441 + tmp443
tmp445 = tmp433 - tmp439
tmp446 = tmp391 * tmp445
tmp447 = tmp444 + tmp446
tmp448 = tmp436 - tmp439
tmp449 = tmp395 * tmp448
tmp450 = tmp447 + tmp449
tmp451 = tl.broadcast_to(tmp450, [XBLOCK, RBLOCK])
tmp453 = tl.sum(tmp451, 1)[:, None]
tmp455 = tl_math.exp(tmp454)
tmp457 = tl_math.exp(tmp456)
tmp458 = tmp455 + tmp457
tmp460 = tl_math.exp(tmp459)
tmp461 = tmp458 + tmp460
tmp463 = tl_math.exp(tmp462)
tmp464 = tmp461 + tmp463
tmp465 = tl_math.log(tmp464)
tmp466 = tmp454 - tmp465
tmp467 = tmp372 * tmp466
tmp468 = tmp456 - tmp465
tmp469 = tmp387 * tmp468
tmp470 = tmp467 + tmp469
tmp471 = tmp459 - tmp465
tmp472 = tmp391 * tmp471
tmp473 = tmp470 + tmp472
tmp474 = tmp462 - tmp465
tmp475 = tmp395 * tmp474
tmp476 = tmp473 + tmp475
tmp477 = tl.broadcast_to(tmp476, [XBLOCK, RBLOCK])
tmp479 = tl.sum(tmp477, 1)[:, None]
tmp481 = tl_math.exp(tmp480)
tmp483 = tl_math.exp(tmp482)
tmp484 = tmp481 + tmp483
tmp486 = tl_math.exp(tmp485)
tmp487 = tmp484 + tmp486
tmp489 = tl_math.exp(tmp488)
tmp490 = tmp487 + tmp489
tmp491 = tl_math.log(tmp490)
tmp492 = tmp480 - tmp491
tmp493 = tmp372 * tmp492
tmp494 = tmp482 - tmp491
tmp495 = tmp387 * tmp494
tmp496 = tmp493 + tmp495
tmp497 = tmp485 - tmp491
tmp498 = tmp391 * tmp497
tmp499 = tmp496 + tmp498
tmp500 = tmp488 - tmp491
tmp501 = tmp395 * tmp500
tmp502 = tmp499 + tmp501
tmp503 = tl.broadcast_to(tmp502, [XBLOCK, RBLOCK])
tmp505 = tl.sum(tmp503, 1)[:, None]
tmp507 = tl_math.exp(tmp506)
tmp509 = tl_math.exp(tmp508)
tmp510 = tmp507 + tmp509
tmp512 = tl_math.exp(tmp511)
tmp513 = tmp510 + tmp512
tmp515 = tl_math.exp(tmp514)
tmp516 = tmp513 + tmp515
tmp517 = tl_math.log(tmp516)
tmp518 = tmp506 - tmp517
tmp519 = tmp372 * tmp518
tmp520 = tmp508 - tmp517
tmp521 = tmp387 * tmp520
tmp522 = tmp519 + tmp521
tmp523 = tmp511 - tmp517
tmp524 = tmp391 * tmp523
tmp525 = tmp522 + tmp524
tmp526 = tmp514 - tmp517
tmp527 = tmp395 * tmp526
tmp528 = tmp525 + tmp527
tmp529 = tl.broadcast_to(tmp528, [XBLOCK, RBLOCK])
tmp531 = tl.sum(tmp529, 1)[:, None]
tmp533 = tl_math.exp(tmp532)
tmp535 = tl_math.exp(tmp534)
tmp536 = tmp533 + tmp535
tmp538 = tl_math.exp(tmp537)
tmp539 = tmp536 + tmp538
tmp541 = tl_math.exp(tmp540)
tmp542 = tmp539 + tmp541
tmp543 = tl_math.log(tmp542)
tmp544 = tmp532 - tmp543
tmp545 = tmp372 * tmp544
tmp546 = tmp534 - tmp543
tmp547 = tmp387 * tmp546
tmp548 = tmp545 + tmp547
tmp549 = tmp537 - tmp543
tmp550 = tmp391 * tmp549
tmp551 = tmp548 + tmp550
tmp552 = tmp540 - tmp543
tmp553 = tmp395 * tmp552
tmp554 = tmp551 + tmp553
tmp555 = tl.broadcast_to(tmp554, [XBLOCK, RBLOCK])
tmp557 = tl.sum(tmp555, 1)[:, None]
tmp560 = tl_math.exp(tmp559)
tmp562 = tl_math.exp(tmp561)
tmp563 = tmp560 + tmp562
tmp565 = tl_math.exp(tmp564)
tmp566 = tmp563 + tmp565
tmp568 = tl_math.exp(tmp567)
tmp569 = tmp566 + tmp568
tmp570 = tl_math.log(tmp569)
tmp571 = tmp559 - tmp570
tmp572 = tmp558 * tmp571
tmp574 = tmp561 - tmp570
tmp575 = tmp573 * tmp574
tmp576 = tmp572 + tmp575
tmp578 = tmp564 - tmp570
tmp579 = tmp577 * tmp578
tmp580 = tmp576 + tmp579
tmp582 = tmp567 - tmp570
tmp583 = tmp581 * tmp582
tmp584 = tmp580 + tmp583
tmp585 = tl.broadcast_to(tmp584, [XBLOCK, RBLOCK])
tmp587 = tl.sum(tmp585, 1)[:, None]
tmp589 = tl_math.exp(tmp588)
tmp591 = tl_math.exp(tmp590)
tmp592 = tmp589 + tmp591
tmp594 = tl_math.exp(tmp593)
tmp595 = tmp592 + tmp594
tmp597 = tl_math.exp(tmp596)
tmp598 = tmp595 + tmp597
tmp599 = tl_math.log(tmp598)
tmp600 = tmp588 - tmp599
tmp601 = tmp558 * tmp600
tmp602 = tmp590 - tmp599
tmp603 = tmp573 * tmp602
tmp604 = tmp601 + tmp603
tmp605 = tmp593 - tmp599
tmp606 = tmp577 * tmp605
tmp607 = tmp604 + tmp606
tmp608 = tmp596 - tmp599
tmp609 = tmp581 * tmp608
tmp610 = tmp607 + tmp609
tmp611 = tl.broadcast_to(tmp610, [XBLOCK, RBLOCK])
tmp613 = tl.sum(tmp611, 1)[:, None]
tmp615 = tl_math.exp(tmp614)
tmp617 = tl_math.exp(tmp616)
tmp618 = tmp615 + tmp617
tmp620 = tl_math.exp(tmp619)
tmp621 = tmp618 + tmp620
tmp623 = tl_math.exp(tmp622)
tmp624 = tmp621 + tmp623
tmp625 = tl_math.log(tmp624)
tmp626 = tmp614 - tmp625
tmp627 = tmp558 * tmp626
tmp628 = tmp616 - tmp625
tmp629 = tmp573 * tmp628
tmp630 = tmp627 + tmp629
tmp631 = tmp619 - tmp625
tmp632 = tmp577 * tmp631
tmp633 = tmp630 + tmp632
tmp634 = tmp622 - tmp625
tmp635 = tmp581 * tmp634
tmp636 = tmp633 + tmp635
tmp637 = tl.broadcast_to(tmp636, [XBLOCK, RBLOCK])
tmp639 = tl.sum(tmp637, 1)[:, None]
tmp641 = tl_math.exp(tmp640)
tmp643 = tl_math.exp(tmp642)
tmp644 = tmp641 + tmp643
tmp646 = tl_math.exp(tmp645)
tmp647 = tmp644 + tmp646
tmp649 = tl_math.exp(tmp648)
tmp650 = tmp647 + tmp649
tmp651 = tl_math.log(tmp650)
tmp652 = tmp640 - tmp651
tmp653 = tmp558 * tmp652
tmp654 = tmp642 - tmp651
tmp655 = tmp573 * tmp654
tmp656 = tmp653 + tmp655
tmp657 = tmp645 - tmp651
tmp658 = tmp577 * tmp657
tmp659 = tmp656 + tmp658
tmp660 = tmp648 - tmp651
tmp661 = tmp581 * tmp660
tmp662 = tmp659 + tmp661
tmp663 = tl.broadcast_to(tmp662, [XBLOCK, RBLOCK])
tmp665 = tl.sum(tmp663, 1)[:, None]
tmp667 = tl_math.exp(tmp666)
tmp669 = tl_math.exp(tmp668)
tmp670 = tmp667 + tmp669
tmp672 = tl_math.exp(tmp671)
tmp673 = tmp670 + tmp672
tmp675 = tl_math.exp(tmp674)
tmp676 = tmp673 + tmp675
tmp677 = tl_math.log(tmp676)
tmp678 = tmp666 - tmp677
tmp679 = tmp558 * tmp678
tmp680 = tmp668 - tmp677
tmp681 = tmp573 * tmp680
tmp682 = tmp679 + tmp681
tmp683 = tmp671 - tmp677
tmp684 = tmp577 * tmp683
tmp685 = tmp682 + tmp684
tmp686 = tmp674 - tmp677
tmp687 = tmp581 * tmp686
tmp688 = tmp685 + tmp687
tmp689 = tl.broadcast_to(tmp688, [XBLOCK, RBLOCK])
tmp691 = tl.sum(tmp689, 1)[:, None]
tmp693 = tl_math.exp(tmp692)
tmp695 = tl_math.exp(tmp694)
tmp696 = tmp693 + tmp695
tmp698 = tl_math.exp(tmp697)
tmp699 = tmp696 + tmp698
tmp701 = tl_math.exp(tmp700)
tmp702 = tmp699 + tmp701
tmp703 = tl_math.log(tmp702)
tmp704 = tmp692 - tmp703
tmp705 = tmp558 * tmp704
tmp706 = tmp694 - tmp703
tmp707 = tmp573 * tmp706
tmp708 = tmp705 + tmp707
tmp709 = tmp697 - tmp703
tmp710 = tmp577 * tmp709
tmp711 = tmp708 + tmp710
tmp712 = tmp700 - tmp703
tmp713 = tmp581 * tmp712
tmp714 = tmp711 + tmp713
tmp715 = tl.broadcast_to(tmp714, [XBLOCK, RBLOCK])
tmp717 = tl.sum(tmp715, 1)[:, None]
tmp719 = tl_math.exp(tmp718)
tmp721 = tl_math.exp(tmp720)
tmp722 = tmp719 + tmp721
tmp724 = tl_math.exp(tmp723)
tmp725 = tmp722 + tmp724
tmp727 = tl_math.exp(tmp726)
tmp728 = tmp725 + tmp727
tmp729 = tl_math.log(tmp728)
tmp730 = tmp718 - tmp729
tmp731 = tmp558 * tmp730
tmp732 = tmp720 - tmp729
tmp733 = tmp573 * tmp732
tmp734 = tmp731 + tmp733
tmp735 = tmp723 - tmp729
tmp736 = tmp577 * tmp735
tmp737 = tmp734 + tmp736
tmp738 = tmp726 - tmp729
tmp739 = tmp581 * tmp738
tmp740 = tmp737 + tmp739
tmp741 = tl.broadcast_to(tmp740, [XBLOCK, RBLOCK])
tmp743 = tl.sum(tmp741, 1)[:, None]
tmp744 = 4.0
tmp745 = tmp587 / tmp744
tmp746 = -tmp745
tmp747 = 0.0
tmp748 = tmp746 + tmp747
tmp749 = tmp613 / tmp744
tmp750 = -tmp749
tmp751 = tmp748 + tmp750
tmp752 = tmp639 / tmp744
tmp753 = -tmp752
tmp754 = tmp751 + tmp753
tmp755 = tmp665 / tmp744
tmp756 = -tmp755
tmp757 = tmp754 + tmp756
tmp758 = tmp691 / tmp744
tmp759 = -tmp758
tmp760 = tmp757 + tmp759
tmp761 = tmp717 / tmp744
tmp762 = -tmp761
tmp763 = tmp760 + tmp762
tmp764 = tmp743 / tmp744
tmp765 = -tmp764
tmp766 = tmp763 + tmp765
tmp767 = 0.14285714285714285
tmp768 = tmp766 * tmp767
tmp769 = tmp401 / tmp744
tmp770 = -tmp769
tmp771 = tmp770 + tmp747
tmp772 = tmp427 / tmp744
tmp773 = -tmp772
tmp774 = tmp771 + tmp773
tmp775 = tmp453 / tmp744
tmp776 = -tmp775
tmp777 = tmp774 + tmp776
tmp778 = tmp479 / tmp744
tmp779 = -tmp778
tmp780 = tmp777 + tmp779
tmp781 = tmp505 / tmp744
tmp782 = -tmp781
tmp783 = tmp780 + tmp782
tmp784 = tmp531 / tmp744
tmp785 = -tmp784
tmp786 = tmp783 + tmp785
tmp787 = tmp557 / tmp744
tmp788 = -tmp787
tmp789 = tmp786 + tmp788
tmp790 = tmp789 * tmp767
tmp791 = tmp215 / tmp744
tmp792 = -tmp791
tmp793 = tmp792 + tmp747
tmp794 = tmp241 / tmp744
tmp795 = -tmp794
tmp796 = tmp793 + tmp795
tmp797 = tmp267 / tmp744
tmp798 = -tmp797
tmp799 = tmp796 + tmp798
tmp800 = tmp293 / tmp744
tmp801 = -tmp800
tmp802 = tmp799 + tmp801
tmp803 = tmp319 / tmp744
tmp804 = -tmp803
tmp805 = tmp802 + tmp804
tmp806 = tmp345 / tmp744
tmp807 = -tmp806
tmp808 = tmp805 + tmp807
tmp809 = tmp371 / tmp744
tmp810 = -tmp809
tmp811 = tmp808 + tmp810
tmp812 = tmp811 * tmp767
tmp813 = tmp29 / tmp744
tmp814 = -tmp813
tmp815 = tmp814 + tmp747
tmp816 = tmp55 / tmp744
tmp817 = -tmp816
tmp818 = tmp815 + tmp817
tmp819 = tmp81 / tmp744
tmp820 = -tmp819
tmp821 = tmp818 + tmp820
tmp822 = tmp107 / tmp744
tmp823 = -tmp822
tmp824 = tmp821 + tmp823
tmp825 = tmp133 / tmp744
tmp826 = -tmp825
tmp827 = tmp824 + tmp826
tmp828 = tmp159 / tmp744
tmp829 = -tmp828
tmp830 = tmp827 + tmp829
tmp831 = tmp185 / tmp744
tmp832 = -tmp831
tmp833 = tmp830 + tmp832
tmp834 = tmp833 * tmp767
tmp835 = tmp768 + tmp747
tmp836 = tmp835 + tmp790
tmp837 = tmp836 + tmp812
tmp838 = tmp837 + tmp834
tmp839 = 0.25
tmp840 = tmp838 * tmp839
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp840, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 1, 16, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
triton_poi_fused_sum_1.run(arg0_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
triton_poi_fused_sum_2.run(arg0_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
triton_poi_fused_sum_3.run(arg0_1, buf0, buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
triton_poi_fused_div_4.run(arg0_1, buf0, buf1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf6, buf7, 16, grid=grid(16), stream=stream0)
buf23 = buf0; del buf0 # reuse
buf56 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
buf79 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q_1], Original ATen: [aten.sum]
triton_per_fused_sum_8.run(arg0_1, buf23, buf56, buf79, 1, 16, grid=grid(1), stream=stream0)
buf24 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_3], Original ATen: [aten.sum]
triton_poi_fused_sum_9.run(arg0_1, buf23, buf24, 4, grid=grid(4), stream=stream0)
buf25 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [sum_17], Original ATen: [aten.sum]
triton_poi_fused_sum_10.run(arg0_1, buf23, buf24, buf25, 4, grid=grid(4), stream=stream0)
buf26 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_4], Original ATen: [aten.sum]
triton_poi_fused_sum_11.run(arg0_1, buf23, buf24, buf25, buf26, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_22], Original ATen: [aten.div]
triton_poi_fused_div_12.run(arg0_1, buf23, buf24, buf25, buf26, buf8, buf27, 16, grid=grid(16), stream=stream0)
buf46 = buf23; del buf23 # reuse
buf81 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q_2], Original ATen: [aten.sum]
triton_per_fused_sum_13.run(arg0_1, buf46, buf81, 1, 16, grid=grid(1), stream=stream0)
buf47 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_6], Original ATen: [aten.sum]
triton_poi_fused_sum_14.run(arg0_1, buf46, buf47, 4, grid=grid(4), stream=stream0)
buf48 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [sum_31], Original ATen: [aten.sum]
triton_poi_fused_sum_15.run(arg0_1, buf46, buf47, buf48, 4, grid=grid(4), stream=stream0)
buf49 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_7], Original ATen: [aten.sum]
triton_poi_fused_sum_16.run(arg0_1, buf46, buf47, buf48, buf49, 4, grid=grid(4), stream=stream0)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf50 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_37], Original ATen: [aten.div]
triton_poi_fused_div_17.run(arg0_1, buf46, buf47, buf48, buf49, buf10, buf33, buf50, 16, grid=grid(16), stream=stream0)
buf69 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [sum_Q_3], Original ATen: [aten.sum]
triton_per_fused_sum_18.run(arg0_1, buf69, 1, 16, grid=grid(1), stream=stream0)
buf70 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_9], Original ATen: [aten.sum]
triton_poi_fused_sum_19.run(arg0_1, buf69, buf70, 4, grid=grid(4), stream=stream0)
buf71 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [sum_45], Original ATen: [aten.sum]
triton_poi_fused_sum_20.run(arg0_1, buf69, buf70, buf71, 4, grid=grid(4), stream=stream0)
buf72 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [sum_of_rows_10], Original ATen: [aten.sum]
triton_poi_fused_sum_21.run(arg0_1, buf69, buf70, buf71, buf72, 4, grid=grid(4), stream=stream0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf58 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf73 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_52], Original ATen: [aten.div]
triton_poi_fused_div_22.run(arg0_1, buf69, buf70, buf71, buf72, buf12, buf35, buf58, buf73, 16, grid=grid(16), stream=stream0)
del buf70
del buf71
del buf72
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf60 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_23.run(arg1_1, buf14, buf37, buf60, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_24.run(arg1_1, buf16, buf39, buf62, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf64 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_25.run(arg1_1, buf18, buf41, buf64, 16, grid=grid(16), stream=stream0)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_26.run(arg1_1, buf20, buf43, buf66, 16, grid=grid(16), stream=stream0)
buf28 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_24], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf27, buf28, 16, grid=grid(16), stream=stream0)
buf29 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [Q_26], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf28, buf29, 16, grid=grid(16), stream=stream0)
buf30 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [Q_29], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf29, buf30, 16, grid=grid(16), stream=stream0)
buf31 = reinterpret_tensor(buf29, (4, 4), (4, 1), 0); del buf29 # reuse
buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_23.run(arg0_1, buf31, buf54, buf77, 16, grid=grid(16), stream=stream0)
del arg0_1
buf51 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_39], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf50, buf51, 16, grid=grid(16), stream=stream0)
buf52 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [Q_41], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf51, buf52, 16, grid=grid(16), stream=stream0)
buf53 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [Q_44], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf52, buf53, 16, grid=grid(16), stream=stream0)
buf74 = buf52; del buf52 # reuse
# Topologically Sorted Source Nodes: [Q_54], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf73, buf74, 16, grid=grid(16), stream=stream0)
buf75 = buf73; del buf73 # reuse
# Topologically Sorted Source Nodes: [Q_56], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf74, buf75, 16, grid=grid(16), stream=stream0)
buf76 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [Q_59], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf75, buf76, 16, grid=grid(16), stream=stream0)
buf83 = reinterpret_tensor(buf75, (4, 4), (4, 1), 0); del buf75 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_27.run(arg1_1, buf83, 16, grid=grid(16), stream=stream0)
buf85 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_28.run(arg1_1, buf85, 16, grid=grid(16), stream=stream0)
buf87 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_29.run(arg1_1, buf87, 16, grid=grid(16), stream=stream0)
buf89 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_30.run(arg1_1, buf89, 16, grid=grid(16), stream=stream0)
del arg1_1
buf11 = buf69; del buf69 # reuse
buf22 = buf11; del buf11 # reuse
buf92 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [log_softmax, mul, sum_8, mean, neg, subloss, log_softmax_1, mul_1, sum_9, mean_1, neg_1, subloss_1, log_softmax_2, mul_2, sum_10, mean_2, neg_2, subloss_2, log_softmax_3, mul_3, sum_11, mean_3, neg_3, subloss_3, log_softmax_4, mul_4, sum_12, mean_4, neg_4, subloss_4, log_softmax_5, mul_5, sum_13, mean_5, neg_5, subloss_5, log_softmax_6, mul_6, sum_14, mean_6, neg_6, subloss_6, truediv_8, loss, log_softmax_7, mul_7, sum_22, mean_7, neg_7, subloss_7, log_softmax_8, mul_8, sum_23, mean_8, neg_8, subloss_8, log_softmax_9, mul_9, sum_24, mean_9, neg_9, subloss_9, log_softmax_10, mul_10, sum_25, mean_10, neg_10, subloss_10, log_softmax_11, mul_11, sum_26, mean_11, neg_11, subloss_11, log_softmax_12, mul_12, sum_27, mean_12, neg_12, subloss_12, log_softmax_13, mul_13, sum_28, mean_13, neg_13, subloss_13, truediv_17, loss_1, log_softmax_14, mul_14, sum_36, mean_14, neg_14, subloss_14, log_softmax_15, mul_15, sum_37, mean_15, neg_15, subloss_15, log_softmax_16, mul_16, sum_38, mean_16, neg_16, subloss_16, log_softmax_17, mul_17, sum_39, mean_17, neg_17, subloss_17, log_softmax_18, mul_18, sum_40, mean_18, neg_18, subloss_18, log_softmax_19, mul_19, sum_41, mean_19, neg_19, subloss_19, log_softmax_20, mul_20, sum_42, mean_20, neg_20, subloss_20, truediv_26, loss_2, log_softmax_21, mul_21, sum_50, mean_21, neg_21, subloss_21, log_softmax_22, mul_22, sum_51, mean_22, neg_22, subloss_22, log_softmax_23, mul_23, sum_52, mean_23, neg_23, subloss_23, log_softmax_24, mul_24, sum_53, mean_24, neg_24, subloss_24, log_softmax_25, mul_25, sum_54, mean_25, neg_25, subloss_25, log_softmax_26, mul_26, sum_55, mean_26, neg_26, subloss_26, log_softmax_27, mul_27, sum_56, mean_27, neg_27, subloss_27, truediv_35, loss_3, truediv_36], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.mean, aten.neg, aten.add, aten.div]
triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31.run(buf92, buf76, buf77, buf79, buf81, buf83, buf85, buf87, buf89, buf53, buf54, buf56, buf58, buf60, buf62, buf64, buf66, buf30, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf7, buf8, buf10, buf12, buf14, buf16, buf18, buf20, 1, 4, grid=grid(1), stream=stream0)
del buf10
del buf12
del buf14
del buf16
del buf18
del buf20
del buf30
del buf31
del buf33
del buf35
del buf37
del buf39
del buf41
del buf43
del buf53
del buf54
del buf56
del buf58
del buf60
del buf62
del buf64
del buf66
del buf7
del buf76
del buf77
del buf79
del buf8
del buf81
del buf83
del buf85
del buf87
del buf89
return (buf92, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
@torch.no_grad()
def sinkhorn(out: 'torch.Tensor', iterations: 'int'=3, epsilon: 'float'=0.05):
"""Distributed sinkhorn algorithm.
As outlined in [0] and implemented in [1].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
[1]: https://github.com/facebookresearch/swav/
Args:
out:
Similarity of the features and the SwaV prototypes.
iterations:
Number of sinkhorn iterations.
epsilon:
Temperature parameter.
Returns:
Soft codes Q assigning each feature to a prototype.
"""
Q = torch.exp(out / epsilon).t()
sum_Q = torch.sum(Q)
Q /= sum_Q
B = Q.shape[1]
K = Q.shape[0]
for i in range(iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
class SwaVLoss(nn.Module):
"""Implementation of the SwaV loss.
Attributes:
temperature:
Temperature parameter used for cross entropy calculations.
sinkhorn_iterations:
Number of iterations of the sinkhorn algorithm.
sinkhorn_epsilon:
Temperature parameter used in the sinkhorn algorithm.
"""
def __init__(self, temperature: 'float'=0.1, sinkhorn_iterations: 'int'
=3, sinkhorn_epsilon: 'float'=0.05):
super(SwaVLoss, self).__init__()
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.sinkhorn_epsilon = sinkhorn_epsilon
def subloss(self, z: 'torch.Tensor', q: 'torch.Tensor'):
"""Calculates the cross entropy for the SwaV prediction problem.
Args:
z:
Similarity of the features and the SwaV prototypes.
q:
Codes obtained from Sinkhorn iterations.
Returns:
Cross entropy between predictions z and codes q.
"""
return -torch.mean(torch.sum(q * F.log_softmax(z / self.temperature,
dim=1), dim=1))
def forward(self, high_resolution_outputs: 'List[torch.Tensor]',
low_resolution_outputs: 'List[torch.Tensor]'):
"""Computes the SwaV loss for a set of high and low resolution outputs.
Args:
high_resolution_outputs:
List of similarities of features and SwaV prototypes for the
high resolution crops.
low_resolution_outputs:
List of similarities of features and SwaV prototypes for the
low resolution crops.
Returns:
Swapping assignments between views loss (SwaV) as described in [0].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
"""
n_crops = len(high_resolution_outputs) + len(low_resolution_outputs)
loss = 0.0
for i in range(len(high_resolution_outputs)):
with torch.no_grad():
q = sinkhorn(high_resolution_outputs[i].detach(),
iterations=self.sinkhorn_iterations, epsilon=self.
sinkhorn_epsilon)
subloss = 0.0
for v in range(len(high_resolution_outputs)):
if v != i:
subloss += self.subloss(high_resolution_outputs[v], q)
for v in range(len(low_resolution_outputs)):
subloss += self.subloss(low_resolution_outputs[v], q)
loss += subloss / (n_crops - 1)
return loss / len(high_resolution_outputs)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_per_fused_sum_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + (16 + r0), None)
tmp9 = tl.load(in_ptr0 + (16 + 4 * r2), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (17 + 4 * r2), None, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (18 + 4 * r2), None, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (19 + 4 * r2), None, eviction_policy='evict_last'
)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (17 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (18 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (19 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (17 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (18 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (19 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp32, xmask)
@triton.jit
def triton_per_fused_sum_13(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + (32 + r0), None)
tmp9 = tl.load(in_ptr0 + (32 + 4 * r2), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (33 + 4 * r2), None, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (34 + 4 * r2), None, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (35 + 4 * r2), None, eviction_policy='evict_last'
)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp0 * tmp7
tmp10 = tmp9 * tmp7
tmp12 = tmp11 * tmp7
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tmp14 * tmp7
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp18 = tmp17 * tmp7
tmp19 = triton_helpers.maximum(tmp16, tmp18)
tmp20 = tmp8 - tmp19
tmp21 = 10.0
tmp22 = tmp20 * tmp21
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (33 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (34 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (35 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (33 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (34 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (35 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp32, xmask)
@triton.jit
def triton_per_fused_sum_18(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (49 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (50 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (51 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (49 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (50 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (51 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = 20.0
tmp18 = tmp0 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp22 = tmp19 / tmp21
tmp24 = tmp22 / tmp23
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp25
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp25
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
tl.store(out_ptr3 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_23(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (17 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (18 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (19 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (33 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (34 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (35 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_26(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (49 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (50 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (51 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_27(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_28(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (17 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (18 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (19 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_29(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (33 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (34 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (35 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_30(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (49 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (50 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (51 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14,
in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21,
in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28,
in_ptr29, in_ptr30, in_ptr31, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr4 + 4 * r0, None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr4 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr4 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr4 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr5 + 4 * r0, None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr5 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp113 = tl.load(in_ptr5 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp116 = tl.load(in_ptr5 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp134 = tl.load(in_ptr6 + 4 * r0, None, eviction_policy='evict_last')
tmp136 = tl.load(in_ptr6 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp139 = tl.load(in_ptr6 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp142 = tl.load(in_ptr6 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp160 = tl.load(in_ptr7 + 4 * r0, None, eviction_policy='evict_last')
tmp162 = tl.load(in_ptr7 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp165 = tl.load(in_ptr7 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp168 = tl.load(in_ptr7 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp186 = tl.load(in_ptr8 + 4 * r0, None, eviction_policy='evict_last')
tmp187 = tl.load(in_ptr9 + 4 * r0, None, eviction_policy='evict_last')
tmp189 = tl.load(in_ptr9 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp192 = tl.load(in_ptr9 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp195 = tl.load(in_ptr9 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp201 = tl.load(in_ptr8 + (1 + 4 * r0), None, eviction_policy='evict_last'
)
tmp205 = tl.load(in_ptr8 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp209 = tl.load(in_ptr8 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp216 = tl.load(in_ptr10 + 4 * r0, None, eviction_policy='evict_last')
tmp218 = tl.load(in_ptr10 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp221 = tl.load(in_ptr10 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp224 = tl.load(in_ptr10 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp242 = tl.load(in_ptr11 + 4 * r0, None, eviction_policy='evict_last')
tmp244 = tl.load(in_ptr11 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp247 = tl.load(in_ptr11 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp250 = tl.load(in_ptr11 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp268 = tl.load(in_ptr12 + 4 * r0, None, eviction_policy='evict_last')
tmp270 = tl.load(in_ptr12 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp273 = tl.load(in_ptr12 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp276 = tl.load(in_ptr12 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp294 = tl.load(in_ptr13 + 4 * r0, None, eviction_policy='evict_last')
tmp296 = tl.load(in_ptr13 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp299 = tl.load(in_ptr13 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp302 = tl.load(in_ptr13 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp320 = tl.load(in_ptr14 + 4 * r0, None, eviction_policy='evict_last')
tmp322 = tl.load(in_ptr14 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp325 = tl.load(in_ptr14 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp328 = tl.load(in_ptr14 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp346 = tl.load(in_ptr15 + 4 * r0, None, eviction_policy='evict_last')
tmp348 = tl.load(in_ptr15 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp351 = tl.load(in_ptr15 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp354 = tl.load(in_ptr15 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp372 = tl.load(in_ptr16 + 4 * r0, None, eviction_policy='evict_last')
tmp373 = tl.load(in_ptr17 + 4 * r0, None, eviction_policy='evict_last')
tmp375 = tl.load(in_ptr17 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp378 = tl.load(in_ptr17 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp381 = tl.load(in_ptr17 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp387 = tl.load(in_ptr16 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp391 = tl.load(in_ptr16 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp395 = tl.load(in_ptr16 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp402 = tl.load(in_ptr18 + 4 * r0, None, eviction_policy='evict_last')
tmp404 = tl.load(in_ptr18 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp407 = tl.load(in_ptr18 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp410 = tl.load(in_ptr18 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp428 = tl.load(in_ptr19 + 4 * r0, None, eviction_policy='evict_last')
tmp430 = tl.load(in_ptr19 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp433 = tl.load(in_ptr19 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp436 = tl.load(in_ptr19 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp454 = tl.load(in_ptr20 + 4 * r0, None, eviction_policy='evict_last')
tmp456 = tl.load(in_ptr20 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp459 = tl.load(in_ptr20 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp462 = tl.load(in_ptr20 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp480 = tl.load(in_ptr21 + 4 * r0, None, eviction_policy='evict_last')
tmp482 = tl.load(in_ptr21 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp485 = tl.load(in_ptr21 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp488 = tl.load(in_ptr21 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp506 = tl.load(in_ptr22 + 4 * r0, None, eviction_policy='evict_last')
tmp508 = tl.load(in_ptr22 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp511 = tl.load(in_ptr22 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp514 = tl.load(in_ptr22 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp532 = tl.load(in_ptr23 + 4 * r0, None, eviction_policy='evict_last')
tmp534 = tl.load(in_ptr23 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp537 = tl.load(in_ptr23 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp540 = tl.load(in_ptr23 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp558 = tl.load(in_ptr24 + 4 * r0, None, eviction_policy='evict_last')
tmp559 = tl.load(in_ptr25 + 4 * r0, None, eviction_policy='evict_last')
tmp561 = tl.load(in_ptr25 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp564 = tl.load(in_ptr25 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp567 = tl.load(in_ptr25 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp573 = tl.load(in_ptr24 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp577 = tl.load(in_ptr24 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp581 = tl.load(in_ptr24 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp588 = tl.load(in_ptr26 + 4 * r0, None, eviction_policy='evict_last')
tmp590 = tl.load(in_ptr26 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp593 = tl.load(in_ptr26 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp596 = tl.load(in_ptr26 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp614 = tl.load(in_ptr27 + 4 * r0, None, eviction_policy='evict_last')
tmp616 = tl.load(in_ptr27 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp619 = tl.load(in_ptr27 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp622 = tl.load(in_ptr27 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp640 = tl.load(in_ptr28 + 4 * r0, None, eviction_policy='evict_last')
tmp642 = tl.load(in_ptr28 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp645 = tl.load(in_ptr28 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp648 = tl.load(in_ptr28 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp666 = tl.load(in_ptr29 + 4 * r0, None, eviction_policy='evict_last')
tmp668 = tl.load(in_ptr29 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp671 = tl.load(in_ptr29 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp674 = tl.load(in_ptr29 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp692 = tl.load(in_ptr30 + 4 * r0, None, eviction_policy='evict_last')
tmp694 = tl.load(in_ptr30 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp697 = tl.load(in_ptr30 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp700 = tl.load(in_ptr30 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp718 = tl.load(in_ptr31 + 4 * r0, None, eviction_policy='evict_last')
tmp720 = tl.load(in_ptr31 + (1 + 4 * r0), None, eviction_policy=
'evict_last')
tmp723 = tl.load(in_ptr31 + (2 + 4 * r0), None, eviction_policy=
'evict_last')
tmp726 = tl.load(in_ptr31 + (3 + 4 * r0), None, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp31 = tl_math.exp(tmp30)
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tl_math.log(tmp40)
tmp42 = tmp30 - tmp41
tmp43 = tmp0 * tmp42
tmp44 = tmp32 - tmp41
tmp45 = tmp15 * tmp44
tmp46 = tmp43 + tmp45
tmp47 = tmp35 - tmp41
tmp48 = tmp19 * tmp47
tmp49 = tmp46 + tmp48
tmp50 = tmp38 - tmp41
tmp51 = tmp23 * tmp50
tmp52 = tmp49 + tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = tl.sum(tmp53, 1)[:, None]
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp56 - tmp67
tmp69 = tmp0 * tmp68
tmp70 = tmp58 - tmp67
tmp71 = tmp15 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp61 - tmp67
tmp74 = tmp19 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp64 - tmp67
tmp77 = tmp23 * tmp76
tmp78 = tmp75 + tmp77
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp83 = tl_math.exp(tmp82)
tmp85 = tl_math.exp(tmp84)
tmp86 = tmp83 + tmp85
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp86 + tmp88
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tl_math.log(tmp92)
tmp94 = tmp82 - tmp93
tmp95 = tmp0 * tmp94
tmp96 = tmp84 - tmp93
tmp97 = tmp15 * tmp96
tmp98 = tmp95 + tmp97
tmp99 = tmp87 - tmp93
tmp100 = tmp19 * tmp99
tmp101 = tmp98 + tmp100
tmp102 = tmp90 - tmp93
tmp103 = tmp23 * tmp102
tmp104 = tmp101 + tmp103
tmp105 = tl.broadcast_to(tmp104, [XBLOCK, RBLOCK])
tmp107 = tl.sum(tmp105, 1)[:, None]
tmp109 = tl_math.exp(tmp108)
tmp111 = tl_math.exp(tmp110)
tmp112 = tmp109 + tmp111
tmp114 = tl_math.exp(tmp113)
tmp115 = tmp112 + tmp114
tmp117 = tl_math.exp(tmp116)
tmp118 = tmp115 + tmp117
tmp119 = tl_math.log(tmp118)
tmp120 = tmp108 - tmp119
tmp121 = tmp0 * tmp120
tmp122 = tmp110 - tmp119
tmp123 = tmp15 * tmp122
tmp124 = tmp121 + tmp123
tmp125 = tmp113 - tmp119
tmp126 = tmp19 * tmp125
tmp127 = tmp124 + tmp126
tmp128 = tmp116 - tmp119
tmp129 = tmp23 * tmp128
tmp130 = tmp127 + tmp129
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp133 = tl.sum(tmp131, 1)[:, None]
tmp135 = tl_math.exp(tmp134)
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tmp143 = tl_math.exp(tmp142)
tmp144 = tmp141 + tmp143
tmp145 = tl_math.log(tmp144)
tmp146 = tmp134 - tmp145
tmp147 = tmp0 * tmp146
tmp148 = tmp136 - tmp145
tmp149 = tmp15 * tmp148
tmp150 = tmp147 + tmp149
tmp151 = tmp139 - tmp145
tmp152 = tmp19 * tmp151
tmp153 = tmp150 + tmp152
tmp154 = tmp142 - tmp145
tmp155 = tmp23 * tmp154
tmp156 = tmp153 + tmp155
tmp157 = tl.broadcast_to(tmp156, [XBLOCK, RBLOCK])
tmp159 = tl.sum(tmp157, 1)[:, None]
tmp161 = tl_math.exp(tmp160)
tmp163 = tl_math.exp(tmp162)
tmp164 = tmp161 + tmp163
tmp166 = tl_math.exp(tmp165)
tmp167 = tmp164 + tmp166
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp167 + tmp169
tmp171 = tl_math.log(tmp170)
tmp172 = tmp160 - tmp171
tmp173 = tmp0 * tmp172
tmp174 = tmp162 - tmp171
tmp175 = tmp15 * tmp174
tmp176 = tmp173 + tmp175
tmp177 = tmp165 - tmp171
tmp178 = tmp19 * tmp177
tmp179 = tmp176 + tmp178
tmp180 = tmp168 - tmp171
tmp181 = tmp23 * tmp180
tmp182 = tmp179 + tmp181
tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK])
tmp185 = tl.sum(tmp183, 1)[:, None]
tmp188 = tl_math.exp(tmp187)
tmp190 = tl_math.exp(tmp189)
tmp191 = tmp188 + tmp190
tmp193 = tl_math.exp(tmp192)
tmp194 = tmp191 + tmp193
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp194 + tmp196
tmp198 = tl_math.log(tmp197)
tmp199 = tmp187 - tmp198
tmp200 = tmp186 * tmp199
tmp202 = tmp189 - tmp198
tmp203 = tmp201 * tmp202
tmp204 = tmp200 + tmp203
tmp206 = tmp192 - tmp198
tmp207 = tmp205 * tmp206
tmp208 = tmp204 + tmp207
tmp210 = tmp195 - tmp198
tmp211 = tmp209 * tmp210
tmp212 = tmp208 + tmp211
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp215 = tl.sum(tmp213, 1)[:, None]
tmp217 = tl_math.exp(tmp216)
tmp219 = tl_math.exp(tmp218)
tmp220 = tmp217 + tmp219
tmp222 = tl_math.exp(tmp221)
tmp223 = tmp220 + tmp222
tmp225 = tl_math.exp(tmp224)
tmp226 = tmp223 + tmp225
tmp227 = tl_math.log(tmp226)
tmp228 = tmp216 - tmp227
tmp229 = tmp186 * tmp228
tmp230 = tmp218 - tmp227
tmp231 = tmp201 * tmp230
tmp232 = tmp229 + tmp231
tmp233 = tmp221 - tmp227
tmp234 = tmp205 * tmp233
tmp235 = tmp232 + tmp234
tmp236 = tmp224 - tmp227
tmp237 = tmp209 * tmp236
tmp238 = tmp235 + tmp237
tmp239 = tl.broadcast_to(tmp238, [XBLOCK, RBLOCK])
tmp241 = tl.sum(tmp239, 1)[:, None]
tmp243 = tl_math.exp(tmp242)
tmp245 = tl_math.exp(tmp244)
tmp246 = tmp243 + tmp245
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp246 + tmp248
tmp251 = tl_math.exp(tmp250)
tmp252 = tmp249 + tmp251
tmp253 = tl_math.log(tmp252)
tmp254 = tmp242 - tmp253
tmp255 = tmp186 * tmp254
tmp256 = tmp244 - tmp253
tmp257 = tmp201 * tmp256
tmp258 = tmp255 + tmp257
tmp259 = tmp247 - tmp253
tmp260 = tmp205 * tmp259
tmp261 = tmp258 + tmp260
tmp262 = tmp250 - tmp253
tmp263 = tmp209 * tmp262
tmp264 = tmp261 + tmp263
tmp265 = tl.broadcast_to(tmp264, [XBLOCK, RBLOCK])
tmp267 = tl.sum(tmp265, 1)[:, None]
tmp269 = tl_math.exp(tmp268)
tmp271 = tl_math.exp(tmp270)
tmp272 = tmp269 + tmp271
tmp274 = tl_math.exp(tmp273)
tmp275 = tmp272 + tmp274
tmp277 = tl_math.exp(tmp276)
tmp278 = tmp275 + tmp277
tmp279 = tl_math.log(tmp278)
tmp280 = tmp268 - tmp279
tmp281 = tmp186 * tmp280
tmp282 = tmp270 - tmp279
tmp283 = tmp201 * tmp282
tmp284 = tmp281 + tmp283
tmp285 = tmp273 - tmp279
tmp286 = tmp205 * tmp285
tmp287 = tmp284 + tmp286
tmp288 = tmp276 - tmp279
tmp289 = tmp209 * tmp288
tmp290 = tmp287 + tmp289
tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK])
tmp293 = tl.sum(tmp291, 1)[:, None]
tmp295 = tl_math.exp(tmp294)
tmp297 = tl_math.exp(tmp296)
tmp298 = tmp295 + tmp297
tmp300 = tl_math.exp(tmp299)
tmp301 = tmp298 + tmp300
tmp303 = tl_math.exp(tmp302)
tmp304 = tmp301 + tmp303
tmp305 = tl_math.log(tmp304)
tmp306 = tmp294 - tmp305
tmp307 = tmp186 * tmp306
tmp308 = tmp296 - tmp305
tmp309 = tmp201 * tmp308
tmp310 = tmp307 + tmp309
tmp311 = tmp299 - tmp305
tmp312 = tmp205 * tmp311
tmp313 = tmp310 + tmp312
tmp314 = tmp302 - tmp305
tmp315 = tmp209 * tmp314
tmp316 = tmp313 + tmp315
tmp317 = tl.broadcast_to(tmp316, [XBLOCK, RBLOCK])
tmp319 = tl.sum(tmp317, 1)[:, None]
tmp321 = tl_math.exp(tmp320)
tmp323 = tl_math.exp(tmp322)
tmp324 = tmp321 + tmp323
tmp326 = tl_math.exp(tmp325)
tmp327 = tmp324 + tmp326
tmp329 = tl_math.exp(tmp328)
tmp330 = tmp327 + tmp329
tmp331 = tl_math.log(tmp330)
tmp332 = tmp320 - tmp331
tmp333 = tmp186 * tmp332
tmp334 = tmp322 - tmp331
tmp335 = tmp201 * tmp334
tmp336 = tmp333 + tmp335
tmp337 = tmp325 - tmp331
tmp338 = tmp205 * tmp337
tmp339 = tmp336 + tmp338
tmp340 = tmp328 - tmp331
tmp341 = tmp209 * tmp340
tmp342 = tmp339 + tmp341
tmp343 = tl.broadcast_to(tmp342, [XBLOCK, RBLOCK])
tmp345 = tl.sum(tmp343, 1)[:, None]
tmp347 = tl_math.exp(tmp346)
tmp349 = tl_math.exp(tmp348)
tmp350 = tmp347 + tmp349
tmp352 = tl_math.exp(tmp351)
tmp353 = tmp350 + tmp352
tmp355 = tl_math.exp(tmp354)
tmp356 = tmp353 + tmp355
tmp357 = tl_math.log(tmp356)
tmp358 = tmp346 - tmp357
tmp359 = tmp186 * tmp358
tmp360 = tmp348 - tmp357
tmp361 = tmp201 * tmp360
tmp362 = tmp359 + tmp361
tmp363 = tmp351 - tmp357
tmp364 = tmp205 * tmp363
tmp365 = tmp362 + tmp364
tmp366 = tmp354 - tmp357
tmp367 = tmp209 * tmp366
tmp368 = tmp365 + tmp367
tmp369 = tl.broadcast_to(tmp368, [XBLOCK, RBLOCK])
tmp371 = tl.sum(tmp369, 1)[:, None]
tmp374 = tl_math.exp(tmp373)
tmp376 = tl_math.exp(tmp375)
tmp377 = tmp374 + tmp376
tmp379 = tl_math.exp(tmp378)
tmp380 = tmp377 + tmp379
tmp382 = tl_math.exp(tmp381)
tmp383 = tmp380 + tmp382
tmp384 = tl_math.log(tmp383)
tmp385 = tmp373 - tmp384
tmp386 = tmp372 * tmp385
tmp388 = tmp375 - tmp384
tmp389 = tmp387 * tmp388
tmp390 = tmp386 + tmp389
tmp392 = tmp378 - tmp384
tmp393 = tmp391 * tmp392
tmp394 = tmp390 + tmp393
tmp396 = tmp381 - tmp384
tmp397 = tmp395 * tmp396
tmp398 = tmp394 + tmp397
tmp399 = tl.broadcast_to(tmp398, [XBLOCK, RBLOCK])
tmp401 = tl.sum(tmp399, 1)[:, None]
tmp403 = tl_math.exp(tmp402)
tmp405 = tl_math.exp(tmp404)
tmp406 = tmp403 + tmp405
tmp408 = tl_math.exp(tmp407)
tmp409 = tmp406 + tmp408
tmp411 = tl_math.exp(tmp410)
tmp412 = tmp409 + tmp411
tmp413 = tl_math.log(tmp412)
tmp414 = tmp402 - tmp413
tmp415 = tmp372 * tmp414
tmp416 = tmp404 - tmp413
tmp417 = tmp387 * tmp416
tmp418 = tmp415 + tmp417
tmp419 = tmp407 - tmp413
tmp420 = tmp391 * tmp419
tmp421 = tmp418 + tmp420
tmp422 = tmp410 - tmp413
tmp423 = tmp395 * tmp422
tmp424 = tmp421 + tmp423
tmp425 = tl.broadcast_to(tmp424, [XBLOCK, RBLOCK])
tmp427 = tl.sum(tmp425, 1)[:, None]
tmp429 = tl_math.exp(tmp428)
tmp431 = tl_math.exp(tmp430)
tmp432 = tmp429 + tmp431
tmp434 = tl_math.exp(tmp433)
tmp435 = tmp432 + tmp434
tmp437 = tl_math.exp(tmp436)
tmp438 = tmp435 + tmp437
tmp439 = tl_math.log(tmp438)
tmp440 = tmp428 - tmp439
tmp441 = tmp372 * tmp440
tmp442 = tmp430 - tmp439
tmp443 = tmp387 * tmp442
tmp444 = tmp441 + tmp443
tmp445 = tmp433 - tmp439
tmp446 = tmp391 * tmp445
tmp447 = tmp444 + tmp446
tmp448 = tmp436 - tmp439
tmp449 = tmp395 * tmp448
tmp450 = tmp447 + tmp449
tmp451 = tl.broadcast_to(tmp450, [XBLOCK, RBLOCK])
tmp453 = tl.sum(tmp451, 1)[:, None]
tmp455 = tl_math.exp(tmp454)
tmp457 = tl_math.exp(tmp456)
tmp458 = tmp455 + tmp457
tmp460 = tl_math.exp(tmp459)
tmp461 = tmp458 + tmp460
tmp463 = tl_math.exp(tmp462)
tmp464 = tmp461 + tmp463
tmp465 = tl_math.log(tmp464)
tmp466 = tmp454 - tmp465
tmp467 = tmp372 * tmp466
tmp468 = tmp456 - tmp465
tmp469 = tmp387 * tmp468
tmp470 = tmp467 + tmp469
tmp471 = tmp459 - tmp465
tmp472 = tmp391 * tmp471
tmp473 = tmp470 + tmp472
tmp474 = tmp462 - tmp465
tmp475 = tmp395 * tmp474
tmp476 = tmp473 + tmp475
tmp477 = tl.broadcast_to(tmp476, [XBLOCK, RBLOCK])
tmp479 = tl.sum(tmp477, 1)[:, None]
tmp481 = tl_math.exp(tmp480)
tmp483 = tl_math.exp(tmp482)
tmp484 = tmp481 + tmp483
tmp486 = tl_math.exp(tmp485)
tmp487 = tmp484 + tmp486
tmp489 = tl_math.exp(tmp488)
tmp490 = tmp487 + tmp489
tmp491 = tl_math.log(tmp490)
tmp492 = tmp480 - tmp491
tmp493 = tmp372 * tmp492
tmp494 = tmp482 - tmp491
tmp495 = tmp387 * tmp494
tmp496 = tmp493 + tmp495
tmp497 = tmp485 - tmp491
tmp498 = tmp391 * tmp497
tmp499 = tmp496 + tmp498
tmp500 = tmp488 - tmp491
tmp501 = tmp395 * tmp500
tmp502 = tmp499 + tmp501
tmp503 = tl.broadcast_to(tmp502, [XBLOCK, RBLOCK])
tmp505 = tl.sum(tmp503, 1)[:, None]
tmp507 = tl_math.exp(tmp506)
tmp509 = tl_math.exp(tmp508)
tmp510 = tmp507 + tmp509
tmp512 = tl_math.exp(tmp511)
tmp513 = tmp510 + tmp512
tmp515 = tl_math.exp(tmp514)
tmp516 = tmp513 + tmp515
tmp517 = tl_math.log(tmp516)
tmp518 = tmp506 - tmp517
tmp519 = tmp372 * tmp518
tmp520 = tmp508 - tmp517
tmp521 = tmp387 * tmp520
tmp522 = tmp519 + tmp521
tmp523 = tmp511 - tmp517
tmp524 = tmp391 * tmp523
tmp525 = tmp522 + tmp524
tmp526 = tmp514 - tmp517
tmp527 = tmp395 * tmp526
tmp528 = tmp525 + tmp527
tmp529 = tl.broadcast_to(tmp528, [XBLOCK, RBLOCK])
tmp531 = tl.sum(tmp529, 1)[:, None]
tmp533 = tl_math.exp(tmp532)
tmp535 = tl_math.exp(tmp534)
tmp536 = tmp533 + tmp535
tmp538 = tl_math.exp(tmp537)
tmp539 = tmp536 + tmp538
tmp541 = tl_math.exp(tmp540)
tmp542 = tmp539 + tmp541
tmp543 = tl_math.log(tmp542)
tmp544 = tmp532 - tmp543
tmp545 = tmp372 * tmp544
tmp546 = tmp534 - tmp543
tmp547 = tmp387 * tmp546
tmp548 = tmp545 + tmp547
tmp549 = tmp537 - tmp543
tmp550 = tmp391 * tmp549
tmp551 = tmp548 + tmp550
tmp552 = tmp540 - tmp543
tmp553 = tmp395 * tmp552
tmp554 = tmp551 + tmp553
tmp555 = tl.broadcast_to(tmp554, [XBLOCK, RBLOCK])
tmp557 = tl.sum(tmp555, 1)[:, None]
tmp560 = tl_math.exp(tmp559)
tmp562 = tl_math.exp(tmp561)
tmp563 = tmp560 + tmp562
tmp565 = tl_math.exp(tmp564)
tmp566 = tmp563 + tmp565
tmp568 = tl_math.exp(tmp567)
tmp569 = tmp566 + tmp568
tmp570 = tl_math.log(tmp569)
tmp571 = tmp559 - tmp570
tmp572 = tmp558 * tmp571
tmp574 = tmp561 - tmp570
tmp575 = tmp573 * tmp574
tmp576 = tmp572 + tmp575
tmp578 = tmp564 - tmp570
tmp579 = tmp577 * tmp578
tmp580 = tmp576 + tmp579
tmp582 = tmp567 - tmp570
tmp583 = tmp581 * tmp582
tmp584 = tmp580 + tmp583
tmp585 = tl.broadcast_to(tmp584, [XBLOCK, RBLOCK])
tmp587 = tl.sum(tmp585, 1)[:, None]
tmp589 = tl_math.exp(tmp588)
tmp591 = tl_math.exp(tmp590)
tmp592 = tmp589 + tmp591
tmp594 = tl_math.exp(tmp593)
tmp595 = tmp592 + tmp594
tmp597 = tl_math.exp(tmp596)
tmp598 = tmp595 + tmp597
tmp599 = tl_math.log(tmp598)
tmp600 = tmp588 - tmp599
tmp601 = tmp558 * tmp600
tmp602 = tmp590 - tmp599
tmp603 = tmp573 * tmp602
tmp604 = tmp601 + tmp603
tmp605 = tmp593 - tmp599
tmp606 = tmp577 * tmp605
tmp607 = tmp604 + tmp606
tmp608 = tmp596 - tmp599
tmp609 = tmp581 * tmp608
tmp610 = tmp607 + tmp609
tmp611 = tl.broadcast_to(tmp610, [XBLOCK, RBLOCK])
tmp613 = tl.sum(tmp611, 1)[:, None]
tmp615 = tl_math.exp(tmp614)
tmp617 = tl_math.exp(tmp616)
tmp618 = tmp615 + tmp617
tmp620 = tl_math.exp(tmp619)
tmp621 = tmp618 + tmp620
tmp623 = tl_math.exp(tmp622)
tmp624 = tmp621 + tmp623
tmp625 = tl_math.log(tmp624)
tmp626 = tmp614 - tmp625
tmp627 = tmp558 * tmp626
tmp628 = tmp616 - tmp625
tmp629 = tmp573 * tmp628
tmp630 = tmp627 + tmp629
tmp631 = tmp619 - tmp625
tmp632 = tmp577 * tmp631
tmp633 = tmp630 + tmp632
tmp634 = tmp622 - tmp625
tmp635 = tmp581 * tmp634
tmp636 = tmp633 + tmp635
tmp637 = tl.broadcast_to(tmp636, [XBLOCK, RBLOCK])
tmp639 = tl.sum(tmp637, 1)[:, None]
tmp641 = tl_math.exp(tmp640)
tmp643 = tl_math.exp(tmp642)
tmp644 = tmp641 + tmp643
tmp646 = tl_math.exp(tmp645)
tmp647 = tmp644 + tmp646
tmp649 = tl_math.exp(tmp648)
tmp650 = tmp647 + tmp649
tmp651 = tl_math.log(tmp650)
tmp652 = tmp640 - tmp651
tmp653 = tmp558 * tmp652
tmp654 = tmp642 - tmp651
tmp655 = tmp573 * tmp654
tmp656 = tmp653 + tmp655
tmp657 = tmp645 - tmp651
tmp658 = tmp577 * tmp657
tmp659 = tmp656 + tmp658
tmp660 = tmp648 - tmp651
tmp661 = tmp581 * tmp660
tmp662 = tmp659 + tmp661
tmp663 = tl.broadcast_to(tmp662, [XBLOCK, RBLOCK])
tmp665 = tl.sum(tmp663, 1)[:, None]
tmp667 = tl_math.exp(tmp666)
tmp669 = tl_math.exp(tmp668)
tmp670 = tmp667 + tmp669
tmp672 = tl_math.exp(tmp671)
tmp673 = tmp670 + tmp672
tmp675 = tl_math.exp(tmp674)
tmp676 = tmp673 + tmp675
tmp677 = tl_math.log(tmp676)
tmp678 = tmp666 - tmp677
tmp679 = tmp558 * tmp678
tmp680 = tmp668 - tmp677
tmp681 = tmp573 * tmp680
tmp682 = tmp679 + tmp681
tmp683 = tmp671 - tmp677
tmp684 = tmp577 * tmp683
tmp685 = tmp682 + tmp684
tmp686 = tmp674 - tmp677
tmp687 = tmp581 * tmp686
tmp688 = tmp685 + tmp687
tmp689 = tl.broadcast_to(tmp688, [XBLOCK, RBLOCK])
tmp691 = tl.sum(tmp689, 1)[:, None]
tmp693 = tl_math.exp(tmp692)
tmp695 = tl_math.exp(tmp694)
tmp696 = tmp693 + tmp695
tmp698 = tl_math.exp(tmp697)
tmp699 = tmp696 + tmp698
tmp701 = tl_math.exp(tmp700)
tmp702 = tmp699 + tmp701
tmp703 = tl_math.log(tmp702)
tmp704 = tmp692 - tmp703
tmp705 = tmp558 * tmp704
tmp706 = tmp694 - tmp703
tmp707 = tmp573 * tmp706
tmp708 = tmp705 + tmp707
tmp709 = tmp697 - tmp703
tmp710 = tmp577 * tmp709
tmp711 = tmp708 + tmp710
tmp712 = tmp700 - tmp703
tmp713 = tmp581 * tmp712
tmp714 = tmp711 + tmp713
tmp715 = tl.broadcast_to(tmp714, [XBLOCK, RBLOCK])
tmp717 = tl.sum(tmp715, 1)[:, None]
tmp719 = tl_math.exp(tmp718)
tmp721 = tl_math.exp(tmp720)
tmp722 = tmp719 + tmp721
tmp724 = tl_math.exp(tmp723)
tmp725 = tmp722 + tmp724
tmp727 = tl_math.exp(tmp726)
tmp728 = tmp725 + tmp727
tmp729 = tl_math.log(tmp728)
tmp730 = tmp718 - tmp729
tmp731 = tmp558 * tmp730
tmp732 = tmp720 - tmp729
tmp733 = tmp573 * tmp732
tmp734 = tmp731 + tmp733
tmp735 = tmp723 - tmp729
tmp736 = tmp577 * tmp735
tmp737 = tmp734 + tmp736
tmp738 = tmp726 - tmp729
tmp739 = tmp581 * tmp738
tmp740 = tmp737 + tmp739
tmp741 = tl.broadcast_to(tmp740, [XBLOCK, RBLOCK])
tmp743 = tl.sum(tmp741, 1)[:, None]
tmp744 = 4.0
tmp745 = tmp587 / tmp744
tmp746 = -tmp745
tmp747 = 0.0
tmp748 = tmp746 + tmp747
tmp749 = tmp613 / tmp744
tmp750 = -tmp749
tmp751 = tmp748 + tmp750
tmp752 = tmp639 / tmp744
tmp753 = -tmp752
tmp754 = tmp751 + tmp753
tmp755 = tmp665 / tmp744
tmp756 = -tmp755
tmp757 = tmp754 + tmp756
tmp758 = tmp691 / tmp744
tmp759 = -tmp758
tmp760 = tmp757 + tmp759
tmp761 = tmp717 / tmp744
tmp762 = -tmp761
tmp763 = tmp760 + tmp762
tmp764 = tmp743 / tmp744
tmp765 = -tmp764
tmp766 = tmp763 + tmp765
tmp767 = 0.14285714285714285
tmp768 = tmp766 * tmp767
tmp769 = tmp401 / tmp744
tmp770 = -tmp769
tmp771 = tmp770 + tmp747
tmp772 = tmp427 / tmp744
tmp773 = -tmp772
tmp774 = tmp771 + tmp773
tmp775 = tmp453 / tmp744
tmp776 = -tmp775
tmp777 = tmp774 + tmp776
tmp778 = tmp479 / tmp744
tmp779 = -tmp778
tmp780 = tmp777 + tmp779
tmp781 = tmp505 / tmp744
tmp782 = -tmp781
tmp783 = tmp780 + tmp782
tmp784 = tmp531 / tmp744
tmp785 = -tmp784
tmp786 = tmp783 + tmp785
tmp787 = tmp557 / tmp744
tmp788 = -tmp787
tmp789 = tmp786 + tmp788
tmp790 = tmp789 * tmp767
tmp791 = tmp215 / tmp744
tmp792 = -tmp791
tmp793 = tmp792 + tmp747
tmp794 = tmp241 / tmp744
tmp795 = -tmp794
tmp796 = tmp793 + tmp795
tmp797 = tmp267 / tmp744
tmp798 = -tmp797
tmp799 = tmp796 + tmp798
tmp800 = tmp293 / tmp744
tmp801 = -tmp800
tmp802 = tmp799 + tmp801
tmp803 = tmp319 / tmp744
tmp804 = -tmp803
tmp805 = tmp802 + tmp804
tmp806 = tmp345 / tmp744
tmp807 = -tmp806
tmp808 = tmp805 + tmp807
tmp809 = tmp371 / tmp744
tmp810 = -tmp809
tmp811 = tmp808 + tmp810
tmp812 = tmp811 * tmp767
tmp813 = tmp29 / tmp744
tmp814 = -tmp813
tmp815 = tmp814 + tmp747
tmp816 = tmp55 / tmp744
tmp817 = -tmp816
tmp818 = tmp815 + tmp817
tmp819 = tmp81 / tmp744
tmp820 = -tmp819
tmp821 = tmp818 + tmp820
tmp822 = tmp107 / tmp744
tmp823 = -tmp822
tmp824 = tmp821 + tmp823
tmp825 = tmp133 / tmp744
tmp826 = -tmp825
tmp827 = tmp824 + tmp826
tmp828 = tmp159 / tmp744
tmp829 = -tmp828
tmp830 = tmp827 + tmp829
tmp831 = tmp185 / tmp744
tmp832 = -tmp831
tmp833 = tmp830 + tmp832
tmp834 = tmp833 * tmp767
tmp835 = tmp768 + tmp747
tmp836 = tmp835 + tmp790
tmp837 = tmp836 + tmp812
tmp838 = tmp837 + tmp834
tmp839 = 0.25
tmp840 = tmp838 * tmp839
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp840, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_mul_7[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = buf0
del buf0
buf56 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
buf79 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_sum_8[grid(1)](arg0_1, buf23, buf56, buf79, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf24 = buf3
del buf3
triton_poi_fused_sum_9[grid(4)](arg0_1, buf23, buf24, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf25 = buf2
del buf2
triton_poi_fused_sum_10[grid(4)](arg0_1, buf23, buf24, buf25, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf26 = buf1
del buf1
triton_poi_fused_sum_11[grid(4)](arg0_1, buf23, buf24, buf25, buf26,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_12[grid(16)](arg0_1, buf23, buf24, buf25,
buf26, buf8, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf46 = buf23
del buf23
buf81 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_sum_13[grid(1)](arg0_1, buf46, buf81, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf47 = buf26
del buf26
triton_poi_fused_sum_14[grid(4)](arg0_1, buf46, buf47, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf48 = buf25
del buf25
triton_poi_fused_sum_15[grid(4)](arg0_1, buf46, buf47, buf48, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf49 = buf24
del buf24
triton_poi_fused_sum_16[grid(4)](arg0_1, buf46, buf47, buf48, buf49,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf50 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_17[grid(16)](arg0_1, buf46, buf47, buf48,
buf49, buf10, buf33, buf50, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf69 = buf46
del buf46
triton_per_fused_sum_18[grid(1)](arg0_1, buf69, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf70 = buf49
del buf49
triton_poi_fused_sum_19[grid(4)](arg0_1, buf69, buf70, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf71 = buf48
del buf48
triton_poi_fused_sum_20[grid(4)](arg0_1, buf69, buf70, buf71, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf72 = buf47
del buf47
triton_poi_fused_sum_21[grid(4)](arg0_1, buf69, buf70, buf71, buf72,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf58 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf73 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_22[grid(16)](arg0_1, buf69, buf70, buf71,
buf72, buf12, buf35, buf58, buf73, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf70
del buf71
del buf72
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf60 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_23[grid(16)](arg1_1, buf14, buf37, buf60, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_24[grid(16)](arg1_1, buf16, buf39, buf62, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf64 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_25[grid(16)](arg1_1, buf18, buf41, buf64, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_26[grid(16)](arg1_1, buf20, buf43, buf66, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf27, buf28, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf29 = buf27
del buf27
triton_poi_fused_div_6[grid(16)](buf28, buf29, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf30 = buf28
del buf28
triton_poi_fused_mul_7[grid(16)](buf29, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf31 = reinterpret_tensor(buf29, (4, 4), (4, 1), 0)
del buf29
buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_23[grid(16)](arg0_1, buf31, buf54, buf77, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf51 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf50, buf51, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf52 = buf50
del buf50
triton_poi_fused_div_6[grid(16)](buf51, buf52, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf53 = buf51
del buf51
triton_poi_fused_mul_7[grid(16)](buf52, buf53, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf74 = buf52
del buf52
triton_poi_fused_div_5[grid(16)](buf73, buf74, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf75 = buf73
del buf73
triton_poi_fused_div_6[grid(16)](buf74, buf75, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf76 = buf74
del buf74
triton_poi_fused_mul_7[grid(16)](buf75, buf76, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf83 = reinterpret_tensor(buf75, (4, 4), (4, 1), 0)
del buf75
triton_poi_fused_27[grid(16)](arg1_1, buf83, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf85 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_28[grid(16)](arg1_1, buf85, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf87 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_29[grid(16)](arg1_1, buf87, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf89 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_30[grid(16)](arg1_1, buf89, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg1_1
buf11 = buf69
del buf69
buf22 = buf11
del buf11
buf92 = buf22
del buf22
triton_per_fused__log_softmax_add_div_mean_mul_neg_sum_31[grid(1)](
buf92, buf76, buf77, buf79, buf81, buf83, buf85, buf87, buf89,
buf53, buf54, buf56, buf58, buf60, buf62, buf64, buf66, buf30,
buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf7, buf8,
buf10, buf12, buf14, buf16, buf18, buf20, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf10
del buf12
del buf14
del buf16
del buf18
del buf20
del buf30
del buf31
del buf33
del buf35
del buf37
del buf39
del buf41
del buf43
del buf53
del buf54
del buf56
del buf58
del buf60
del buf62
del buf64
del buf66
del buf7
del buf76
del buf77
del buf79
del buf8
del buf81
del buf83
del buf85
del buf87
del buf89
return buf92,
@torch.no_grad()
def sinkhorn(out: 'torch.Tensor', iterations: 'int'=3, epsilon: 'float'=0.05):
"""Distributed sinkhorn algorithm.
As outlined in [0] and implemented in [1].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
[1]: https://github.com/facebookresearch/swav/
Args:
out:
Similarity of the features and the SwaV prototypes.
iterations:
Number of sinkhorn iterations.
epsilon:
Temperature parameter.
Returns:
Soft codes Q assigning each feature to a prototype.
"""
Q = torch.exp(out / epsilon).t()
sum_Q = torch.sum(Q)
Q /= sum_Q
B = Q.shape[1]
K = Q.shape[0]
for i in range(iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
class SwaVLossNew(nn.Module):
"""Implementation of the SwaV loss.
Attributes:
temperature:
Temperature parameter used for cross entropy calculations.
sinkhorn_iterations:
Number of iterations of the sinkhorn algorithm.
sinkhorn_epsilon:
Temperature parameter used in the sinkhorn algorithm.
"""
def __init__(self, temperature: 'float'=0.1, sinkhorn_iterations: 'int'
=3, sinkhorn_epsilon: 'float'=0.05):
super(SwaVLossNew, self).__init__()
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.sinkhorn_epsilon = sinkhorn_epsilon
def subloss(self, z: 'torch.Tensor', q: 'torch.Tensor'):
"""Calculates the cross entropy for the SwaV prediction problem.
Args:
z:
Similarity of the features and the SwaV prototypes.
q:
Codes obtained from Sinkhorn iterations.
Returns:
Cross entropy between predictions z and codes q.
"""
return -torch.mean(torch.sum(q * F.log_softmax(z / self.temperature,
dim=1), dim=1))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lightly-ai/lightly | SwaVLoss | false | 16,050 | [
"MIT"
] | 1,515 | 0b98bda640d13d842fd13f9354271d0cef116ba5 | https://github.com/lightly-ai/lightly/tree/0b98bda640d13d842fd13f9354271d0cef116ba5 |
Lookahead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ak/caknpdma3ayvuighuunzadvdrg25ugwh3fdtfopem2d4pwopif5u.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute, [0, 3], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 8], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = x2
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), tmp2 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2 + (7*y3)), tmp3, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vq/cvqpprnukykv7fb6t2uveui44qrapemorby5j3fnnfeymwpqwe63.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_3 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 16, 7, grid=grid(16, 7), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
del buf1
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Lookahead(nn.Module):
def __init__(self, n_features, context):
super(Lookahead, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = 0, self.context - 1
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size
=self.context, stride=1, groups=self.n_features, padding=0,
bias=None)
def forward(self, x):
x = x.transpose(1, 2)
x = F.pad(x, pad=self.pad, value=0)
x = self.conv(x)
x = x.transpose(1, 2).contiguous()
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'context': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = x2
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp2 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2 + 7 * y3), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16,
7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf1, buf2, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf1
return buf2, primals_2, buf0
class LookaheadNew(nn.Module):
def __init__(self, n_features, context):
super(LookaheadNew, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = 0, self.context - 1
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size
=self.context, stride=1, groups=self.n_features, padding=0,
bias=None)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| maxwellzh/CAT | Lookahead | false | 16,051 | [
"Apache-2.0"
] | 237 | b1a9c3f95e84d968593a05bf8b176b5f77b8055e | https://github.com/maxwellzh/CAT/tree/b1a9c3f95e84d968593a05bf8b176b5f77b8055e |
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tx/ctxqjm35hk2epafv7snppixsjvect7yi763ny23djhif5bukjobq.py
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, exp, log1p, mul_3, sub, modulator, cross_entropy, loss, weighted_loss, focal_loss, sum_2, truediv], Original ATen: [aten.mul, aten.exp, aten.log1p, aten.sub, aten.binary_cross_entropy_with_logits, aten.sum, aten.div]
# Source node to ATen node mapping:
# cross_entropy => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# exp => exp_1
# focal_loss => sum_1
# log1p => log1p_1
# loss => mul_5
# modulator => exp_2
# mul => mul_1
# mul_1 => mul_2
# mul_2 => mul_3
# mul_3 => mul_4
# sub => sub_3
# sum_2 => sum_2
# truediv => div
# weighted_loss => mul_6
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -4), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %arg1_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, -1.0), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log1p_1, 4), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_4), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_2, %sub_2), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, 4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = -4.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = -1.0
tmp6 = tmp3 * tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = libdevice.log1p(tmp7)
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tmp11 = tmp4 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = 1.0
tmp14 = tmp13 - tmp0
tmp15 = tmp14 * tmp3
tmp16 = 0.0
tmp17 = triton_helpers.minimum(tmp16, tmp3)
tmp18 = tl_math.abs(tmp3)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp15 - tmp22
tmp24 = tmp12 * tmp23
tmp25 = tmp24 * tmp9
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = tl.broadcast_to(tmp0, [RBLOCK])
tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0))
tmp32 = tmp28 / tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp32, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, exp, log1p, mul_3, sub, modulator, cross_entropy, loss, weighted_loss, focal_loss, sum_2, truediv], Original ATen: [aten.mul, aten.exp, aten.log1p, aten.sub, aten.binary_cross_entropy_with_logits, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' else loss.sum(
) if reduction == 'sum' else loss
class FocalLoss(nn.Module):
"""
Origianl code is from https://github.com/richardaecn/class-balanced-loss/blob/master/src/cifar_main.py#L226-L266
"""
def __init__(self, alpha, gamma, normalize):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.normalize = normalize
def forward(self, preds, targets):
cross_entropy = F.binary_cross_entropy_with_logits(preds, targets,
reduction='none')
gamma = self.gamma
if gamma == 0.0:
modulator = 1.0
else:
modulator = th.exp(-gamma * targets * preds - gamma * th.log1p(
th.exp(-1.0 * preds)))
loss = modulator * cross_entropy
weighted_loss = self.alpha * loss
focal_loss = reduce_loss(weighted_loss, reduction='sum')
return focal_loss / targets.sum() if self.normalize else focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4, 'gamma': 4, 'normalize': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = -4.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = -1.0
tmp6 = tmp3 * tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = libdevice.log1p(tmp7)
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tmp11 = tmp4 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = 1.0
tmp14 = tmp13 - tmp0
tmp15 = tmp14 * tmp3
tmp16 = 0.0
tmp17 = triton_helpers.minimum(tmp16, tmp3)
tmp18 = tl_math.abs(tmp3)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp15 - tmp22
tmp24 = tmp12 * tmp23
tmp25 = tmp24 * tmp9
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = tl.broadcast_to(tmp0, [RBLOCK])
tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0))
tmp32 = tmp28 / tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_exp_log1p_mul_sub_sum_0[
grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' else loss.sum(
) if reduction == 'sum' else loss
class FocalLossNew(nn.Module):
"""
Origianl code is from https://github.com/richardaecn/class-balanced-loss/blob/master/src/cifar_main.py#L226-L266
"""
def __init__(self, alpha, gamma, normalize):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.normalize = normalize
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| microsoft/vision-longformer | FocalLoss | false | 16,052 | [
"MIT"
] | 169 | c9ce386de3e633bb3c805368d118356fbd696487 | https://github.com/microsoft/vision-longformer/tree/c9ce386de3e633bb3c805368d118356fbd696487 |
CRFOutputLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dw/cdwmqu743nczlvrj7tkipbzih5kt2cf52yjyk66x6qqwoxzqkwcu.py
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_1 => add_1
# max_1 => max_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_2, %unsqueeze_1), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1), kwargs = {})
triton_poi_fused_add_max_0 = async_compile.triton('triton_poi_fused_add_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + (1))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (2))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (2))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (3))
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + (3))
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tmp80 = tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + (x2), tmp38, xmask)
tl.store(out_ptr1 + (x2), tmp81, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ho/cho3kyyvvlfzvcgnxvqbk6afqxqu2eee4bblrr2c4njpbeom6354.py
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_3 => add_3
# max_2 => max_2
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_3, %unsqueeze_1), kwargs = {})
# %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_3, 1), kwargs = {})
triton_poi_fused_add_max_1 = async_compile.triton('triton_poi_fused_add_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + (16*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (6 + (16*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (2))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (7 + (16*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (3))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tmp76 = tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + (x2), tmp34, xmask)
tl.store(out_ptr1 + (x2), tmp77, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/i2/ci2s3onbganqcbrkqgul24aydnipaukm6yf5o2l7jzdixgeapo6p.py
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_5 => add_5
# max_3 => max_3
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_4, %unsqueeze_1), kwargs = {})
# %max_3 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 1), kwargs = {})
triton_poi_fused_add_max_2 = async_compile.triton('triton_poi_fused_add_max_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + (16*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (10 + (16*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (2))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (11 + (16*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (3))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tmp76 = tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + (x2), tmp34, xmask)
tl.store(out_ptr1 + (x2), tmp77, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cj/ccjggvguunan7bokqtiojzjz3sm3gwnc263uehtysgzmly66krb4.py
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
# Source node to ATen node mapping:
# add_7 => add_7
# max_4 => max_4
# tag_1 => gather
# tag_2 => gather_1
# tag_3 => gather_2
# v_6 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %select_3), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %unsqueeze_5), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_7, 1, True), kwargs = {})
# %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_5, 1, %getitem_7), kwargs = {})
# %gather_1 : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_3, 1, %gather), kwargs = {})
# %gather_2 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_1, 1, %gather_1), kwargs = {})
triton_poi_fused_add_gather_max_3 = async_compile.triton('triton_poi_fused_add_gather_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*i64', 6: '*i64', 7: '*i64', 8: '*i64', 9: '*i64', 10: '*i64', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gather_max_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (1))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + (1))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2))
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + (2))
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr2 + (3))
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + (3))
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tmp77 = tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert(((0 <= tmp82) & (tmp82 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp82 < 4")
tmp84 = tl.load(in_ptr4 + (tmp82 + (4*x0)), xmask, eviction_policy='evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert(((0 <= tmp87) & (tmp87 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp87 < 4")
tmp89 = tl.load(in_ptr5 + (tmp87 + (4*x0)), xmask, eviction_policy='evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert(((0 <= tmp92) & (tmp92 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp92 < 4")
tmp94 = tl.load(in_ptr6 + (tmp92 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (4*x0), tmp78, xmask)
tl.store(out_ptr1 + (4*x0), tmp94, xmask)
tl.store(out_ptr2 + (4*x0), tmp89, xmask)
tl.store(out_ptr3 + (4*x0), tmp84, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, ), (1, ))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0), reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_add_max_0.run(buf0, arg1_1, arg3_1, arg4_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_1.run(buf1, buf0, arg1_1, arg4_1, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = buf1; del buf1 # reuse
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_2.run(buf3, buf0, arg1_1, arg4_1, buf5, buf6, 16, grid=grid(16), stream=stream0)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3) # alias
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0) # alias
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1) # alias
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2) # alias
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
triton_poi_fused_add_gather_max_3.run(buf5, buf0, arg1_1, arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, grid=grid(4), stream=stream0)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return (buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayer(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayer, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def forward(self, hidden):
feats = self.output_projection(hidden)
return self.crf(feats)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + 2)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + 3)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + 3)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp81, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + 2)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + 2)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr2 + 3)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + 3)
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert((0 <= tmp82) & (tmp82 < 4) | ~xmask,
'index out of bounds: 0 <= tmp82 < 4')
tmp84 = tl.load(in_ptr4 + (tmp82 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert((0 <= tmp87) & (tmp87 < 4) | ~xmask,
'index out of bounds: 0 <= tmp87 < 4')
tmp89 = tl.load(in_ptr5 + (tmp87 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask,
'index out of bounds: 0 <= tmp92 < 4')
tmp94 = tl.load(in_ptr6 + (tmp92 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp78, xmask)
tl.store(out_ptr1 + 4 * x0, tmp94, xmask)
tl.store(out_ptr2 + 4 * x0, tmp89, xmask)
tl.store(out_ptr3 + 4 * x0, tmp84, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4,), (1,))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0),
reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](buf0, arg1_1, arg3_1, arg4_1,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf1, buf0, arg1_1, arg4_1,
buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf3, buf0, arg1_1, arg4_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3)
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0)
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1)
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf5, buf0, arg1_1,
arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return buf11,
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayerNew(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayerNew, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg4_1 = self.crf.transitions
arg3_1 = self.crf.start_transitions
arg5_1 = self.crf.stop_transitions
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
| markiewagner/torchnlp | CRFOutputLayer | false | 16,053 | [
"Apache-2.0"
] | 262 | 92f0a98c7c2b407508810834cbfd544214481695 | https://github.com/markiewagner/torchnlp/tree/92f0a98c7c2b407508810834cbfd544214481695 |
ToSEG | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s3/cs3vevvlveudb7oguup5ljgcyslvygs2cnrc5347em4iypopundn.py
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_2 => mul_2
# weight => mul_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r2/cr263a6gzji5hcuzutpzrubs2olns2ao2sa7aaaziqrb7stxhlqd.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm]
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (1, 16, 4, 4), (256, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf5, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (16, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 4, 1, 1), (16, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *
rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, bias, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
if bias:
grad_bias = grad_input.sum(dim).detach()
else:
grad_bias = empty
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToSEG(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, upsample=True,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, out_channel, 1, style_dim,
demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
triton_poi_fused_mul_2[grid(64)](primals_5, buf2, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf4, (1, 16, 4, 4), (256, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_3[grid(256)](buf5, primals_6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (16,
4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4,
4), (256, 16, 4, 1), 0)
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *
rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, bias, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
if bias:
grad_bias = grad_input.sum(dim).detach()
else:
grad_bias = empty
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToSEGNew(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, upsample=True,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, out_channel, 1, style_dim,
demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input_0, input_1):
primals_6 = self.bias
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| mfredriksz/semanticGAN_code | ToSEG | false | 16,055 | [
"BSD-2-Clause",
"MIT"
] | 107 | c6e7b490086afd8a7593e2892452295555910494 | https://github.com/mfredriksz/semanticGAN_code/tree/c6e7b490086afd8a7593e2892452295555910494 |
MatrixVectorScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7j/c7jbjuldpm4lmq5j2yxriaa5pzxd66yavo6on2kne32aapwrry3v.py
# Topologically Sorted Source Nodes: [mul, attn], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attn => sum_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 1), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 1.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/il/cilhya4qf4nxxtiz5lozhkqi55asbjq743m2fnssk4qjcyqzbfzn.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ss/cssfnjfq2qzbgkkul2pahdlja6uh342bt6epehft6wvaxhwcx4t7.py
# Topologically Sorted Source Nodes: [mul_1, output], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul_1 => mul_1
# output => sum_3
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %arg2_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, attn], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul_1, output], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf2, arg2_1, buf3, 256, grid=grid(256), stream=stream0)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class MatrixVectorScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=1)
def forward(self, q, k, v, mask=None):
"""
q: tensor of shape (n*b, d_k)
k: tensor of shape (n*b, l, d_k)
v: tensor of shape (n*b, l, d_v)
returns: tensor of shape (n*b, d_v), tensor of shape(n*b, l)
"""
attn = (q.unsqueeze(1) * k).sum(2)
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = (attn.unsqueeze(2) * v).sum(1)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 1.0
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tl_math.exp(tmp10)
tl.store(out_ptr0 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_mul_sum_3[grid(256)](buf2, arg2_1, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg2_1
return buf3, buf2
class MatrixVectorScaledDotProductAttentionNew(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| michiyasunaga/GreaseLM | MatrixVectorScaledDotProductAttention | false | 16,056 | [
"MIT"
] | 76 | 596aa5047841e3e97730f621a2e4576772733df2 | https://github.com/michiyasunaga/GreaseLM/tree/596aa5047841e3e97730f621a2e4576772733df2 |
FFModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d6/cd64wvrgscfuoaqir5r6psyuiuudcb7uj7csbtkw5mtg6dycegdi.py
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.silu]
# Source node to ATen node mapping:
# output_2 => mul_2, sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
triton_poi_fused_silu_2 = async_compile.triton('triton_poi_fused_silu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_silu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_silu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/l2/cl2v6fq2jlrf4vur4lu45wajj6bbcbb73slmwzxr3gl5n4pe55pz.py
# Topologically Sorted Source Nodes: [mul, output_6], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_3
# output_6 => add_2
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {})
triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.silu]
triton_poi_fused_silu_2.run(buf3, buf4, 1024, grid=grid(1024), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [mul, output_6], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_3.run(buf6, primals_3, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FFModule(nn.Module):
"""Feed-forward module
default output dimension = idim
x0 -> LayerNorm -> FC -> Swish -> Dropout -> FC -> Dropout -> x1
x0 + res_factor * x1 -> output
"""
def __init__(self, idim: 'int', res_factor: 'float'=0.5, dropout:
'float'=0.0) ->None:
super().__init__()
assert res_factor > 0.0 and dropout >= 0.0
self._res_factor = res_factor
self.ln = nn.LayerNorm([idim])
self.fc0 = nn.Linear(idim, idim * 4)
self.swish = nn.SiLU()
self.dropout0 = nn.Dropout(dropout)
self.fc1 = nn.Linear(idim * 4, idim)
self.dropout1 = nn.Dropout(dropout)
def forward(self, x: 'torch.Tensor'):
output = self.ln(x)
output = self.fc0(output)
output = self.swish(output)
output = self.dropout0(output)
output = self.fc1(output)
output = self.dropout1(output)
output = x + self._res_factor * output
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'idim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_silu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_silu_2[grid(1024)](buf3, buf4, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_mul_3[grid(256)](buf6, primals_3, primals_7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0
), primals_6, primals_4
class FFModuleNew(nn.Module):
"""Feed-forward module
default output dimension = idim
x0 -> LayerNorm -> FC -> Swish -> Dropout -> FC -> Dropout -> x1
x0 + res_factor * x1 -> output
"""
def __init__(self, idim: 'int', res_factor: 'float'=0.5, dropout:
'float'=0.0) ->None:
super().__init__()
assert res_factor > 0.0 and dropout >= 0.0
self._res_factor = res_factor
self.ln = nn.LayerNorm([idim])
self.fc0 = nn.Linear(idim, idim * 4)
self.swish = nn.SiLU()
self.dropout0 = nn.Dropout(dropout)
self.fc1 = nn.Linear(idim * 4, idim)
self.dropout1 = nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.ln.weight
primals_2 = self.ln.bias
primals_4 = self.fc0.weight
primals_5 = self.fc0.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| maxwellzh/CAT | FFModule | false | 16,057 | [
"Apache-2.0"
] | 237 | b1a9c3f95e84d968593a05bf8b176b5f77b8055e | https://github.com/maxwellzh/CAT/tree/b1a9c3f95e84d968593a05bf8b176b5f77b8055e |
Acosh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sp/csph23e3uczekefsnrdpqng6iibnnyrwrrytkv6t6jxl7elsjhf5.py
# Topologically Sorted Source Nodes: [acosh], Original ATen: [aten.acosh]
# Source node to ATen node mapping:
# acosh => acosh
# Graph fragment:
# %acosh : [num_users=1] = call_function[target=torch.ops.aten.acosh.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_acosh_0 = async_compile.triton('triton_poi_fused_acosh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_acosh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_acosh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.acosh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [acosh], Original ATen: [aten.acosh]
stream0 = get_raw_stream(0)
triton_poi_fused_acosh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Acosh(nn.Module):
def forward(self, x):
return torch.acosh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_acosh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.acosh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_acosh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AcoshNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Acosh | false | 16,058 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Cos | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hn/chncdvnujauxq6f6q7jnanla4d6y3auixelm26y42jq3nuckgdxy.py
# Topologically Sorted Source Nodes: [cos], Original ATen: [aten.cos]
# Source node to ATen node mapping:
# cos => cos
# Graph fragment:
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_cos_0 = async_compile.triton('triton_poi_fused_cos_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cos_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.cos(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cos], Original ATen: [aten.cos]
stream0 = get_raw_stream(0)
triton_poi_fused_cos_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Cos(nn.Module):
def forward(self, x):
return torch.cos(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.cos(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cos_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CosNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Cos | false | 16,059 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.