entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ChebConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vg/cvgackp4gu33hqabol7ezxcsrxtvwoey7n7hsvngdnwtda7enfsr.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.zeros] # Source node to ATen node mapping: # x1 => full # Graph fragment: # %full : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 16], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vy/cvyf5zgh3bojmxotqf7rr5vgkxpnfdg7jme5aju2foipmsjqw6pb.py # Topologically Sorted Source Nodes: [x0], Original ATen: [aten.clone] # Source node to ATen node mapping: # x0 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ow/cow6qxmvxv2md6xzffxet6eferts7774vmodub2f3pav2ybiwcwi.py # Topologically Sorted Source Nodes: [mul, x2], Original ATen: [aten.mul, aten.sub] # Source node to ATen node mapping: # mul => mul # x2 => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_8, 2), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %view), kwargs = {}) triton_poi_fused_mul_sub_2 = async_compile.triton('triton_poi_fused_mul_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + (x1 + (16*y0)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pf/cpfbzktc35b3whbm4qsrmwhfxm4eq5sxagsjudza36cnpsivlywx.py # Topologically Sorted Source Nodes: [inputs_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # inputs_3 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %unsqueeze_3],), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y1 = (yindex // 16) x2 = xindex y0 = yindex % 16 tmp0 = y1 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.broadcast_to(y1, [XBLOCK, YBLOCK]) tmp6 = tmp5 >= tmp1 tmp7 = tl.full([1, 1], 2, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp8 & tmp4 tmp10 = tl.full([1, 1], 1, tl.int64) tmp11 = tmp5 < tmp10 tmp12 = tmp11 & tmp9 tmp13 = tl.load(in_ptr0 + (y0 + (16*x2)), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp14 = tmp5 >= tmp10 tmp15 = tmp14 & tmp9 tmp16 = tl.load(in_ptr1 + (x2 + (4*y0)), tmp15 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp17 = tl.where(tmp11, tmp13, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp5 >= tmp7 tmp21 = tmp5 < tmp3 tmp22 = tmp20 & tmp4 tmp23 = tl.load(in_ptr2 + (x2 + (4*y0)), tmp22 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tl.load(in_ptr0 + (y0 + (16*x2)), tmp22 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp27 = tmp25 - tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp22, tmp27, tmp28) tmp30 = tl.where(tmp8, tmp19, tmp29) tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) tmp32 = tl.where(tmp4, tmp30, tmp31) tmp33 = tmp0 >= tmp3 tmp34 = tl.full([1, 1], 4, tl.int64) tmp35 = tmp0 < tmp34 tmp36 = tl.load(in_ptr3 + (x2 + (4*y0)), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp37 = tmp36 * tmp24 tmp38 = tl.load(in_ptr1 + (x2 + (4*y0)), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp33, tmp39, tmp40) tmp42 = tl.where(tmp4, tmp32, tmp41) tl.store(out_ptr0 + (y0 + (16*x2) + (64*y1)), tmp42, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l7/cl7es77p5pbxyzlhffj6co3rh25cxlsdvva6jezkofwx6qptwnuk.py # Topologically Sorted Source Nodes: [inputs_5, inputs_6], Original ATen: [aten.clone, aten.view] # Source node to ATen node mapping: # inputs_5 => clone_1 # inputs_6 => view_2 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_13,), kwargs = {memory_format: torch.contiguous_format}) # %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_1, [16, 16]), kwargs = {}) triton_poi_fused_clone_view_4 = async_compile.triton('triton_poi_fused_clone_view_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*(x0 // 4)) + (16*(x1 % 4)) + (64*(x0 % 4)) + (x1 // 4)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sz/csz5twnkhe2zz3eaafwmgidmezpgvs3favuuva73uud6rs7ouhj6.py # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.add] # Source node to ATen node mapping: # outputs => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {}) triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.zeros] stream0 = get_raw_stream(0) triton_poi_fused_zeros_0.run(buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x0], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x1], Original ATen: [aten._sparse_addmm] buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), reinterpret_tensor(buf1, (16, 4), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) buf3 = buf2 del buf2 # Topologically Sorted Source Nodes: [_sparse_mm_1], Original ATen: [aten._sparse_addmm] buf4 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), buf3, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x2], Original ATen: [aten.mul, aten.sub] triton_poi_fused_mul_sub_2.run(buf5, buf1, buf6, 4, 16, grid=grid(4, 16), stream=stream0) # Topologically Sorted Source Nodes: [_sparse_mm_2], Original ATen: [aten._sparse_addmm] buf7 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), reinterpret_tensor(buf6, (16, 4), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del buf0 del buf6 del primals_2 buf8 = buf7 del buf7 buf9 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_3], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(buf1, buf3, buf5, buf8, buf9, 64, 4, grid=grid(64, 4), stream=stream0) del buf1 del buf3 del buf5 buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_5, inputs_6], Original ATen: [aten.clone, aten.view] triton_poi_fused_clone_view_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0) del buf9 buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [inputs_7], Original ATen: [aten.mm] extern_kernels.mm(buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), out=buf11) del primals_1 buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf12, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 return (buf12, reinterpret_tensor(buf10, (16, 16), (1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch def cheb_conv(laplacian, inputs, weight): """Chebyshev convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. weight (:obj:`torch.Tensor`): The weights of the current layer. Returns: :obj:`torch.Tensor`: Inputs after applying Chebyshev convolution. """ B, V, Fin = inputs.shape K, Fin, Fout = weight.shape x0 = inputs.permute(1, 2, 0).contiguous() x0 = x0.view([V, Fin * B]) inputs = x0.unsqueeze(0) if K > 0: x1 = torch.sparse.mm(laplacian, x0) inputs = torch.cat((inputs, x1.unsqueeze(0)), 0) for _ in range(1, K - 1): x2 = 2 * torch.sparse.mm(laplacian, x1) - x0 inputs = torch.cat((inputs, x2.unsqueeze(0)), 0) x0, x1 = x1, x2 inputs = inputs.view([K, V, Fin, B]) inputs = inputs.permute(3, 1, 2, 0).contiguous() inputs = inputs.view([B * V, Fin * K]) weight = weight.view(Fin * K, Fout) inputs = inputs.matmul(weight) inputs = inputs.view([B, V, Fout]) return inputs class ChebConv(torch.nn.Module): """Graph convolutional layer. """ def __init__(self, in_channels, out_channels, kernel_size, bias=True, conv=cheb_conv): """Initialize the Chebyshev layer. Args: in_channels (int): Number of channels/features in the input graph. out_channels (int): Number of channels/features in the output graph. kernel_size (int): Number of trainable parameters per filter, which is also the size of the convolutional kernel. The order of the Chebyshev polynomials is kernel_size - 1. bias (bool): Whether to add a bias term. conv (callable): Function which will perform the actual convolution. """ super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self._conv = conv shape = kernel_size, in_channels, out_channels self.weight = torch.nn.Parameter(torch.Tensor(*shape)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.kaiming_initialization() def kaiming_initialization(self): """Initialize weights and bias. """ std = math.sqrt(2 / (self.in_channels * self.kernel_size)) self.weight.data.normal_(0, std) if self.bias is not None: self.bias.data.fill_(0.01) def forward(self, laplacian, inputs): """Forward graph convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. Returns: :obj:`torch.Tensor`: The convoluted inputs. """ outputs = self._conv(laplacian, inputs, self.weight) if self.bias is not None: outputs += self.bias return outputs def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x1 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + (x1 + 16 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y1 = yindex // 16 x2 = xindex y0 = yindex % 16 tmp0 = y1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.broadcast_to(y1, [XBLOCK, YBLOCK]) tmp7 = tl.full([1, 1], 2, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp8 & tmp4 tmp10 = tl.full([1, 1], 1, tl.int64) tmp11 = tmp5 < tmp10 tmp12 = tmp11 & tmp9 tmp13 = tl.load(in_ptr0 + (y0 + 16 * x2), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp14 = tmp5 >= tmp10 tmp15 = tmp14 & tmp9 tmp16 = tl.load(in_ptr1 + (x2 + 4 * y0), tmp15 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp17 = tl.where(tmp11, tmp13, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp5 >= tmp7 tmp22 = tmp20 & tmp4 tmp23 = tl.load(in_ptr2 + (x2 + 4 * y0), tmp22 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tl.load(in_ptr0 + (y0 + 16 * x2), tmp22 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp27 = tmp25 - tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp22, tmp27, tmp28) tmp30 = tl.where(tmp8, tmp19, tmp29) tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) tmp32 = tl.where(tmp4, tmp30, tmp31) tmp33 = tmp0 >= tmp3 tl.full([1, 1], 4, tl.int64) tmp36 = tl.load(in_ptr3 + (x2 + 4 * y0), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp37 = tmp36 * tmp24 tmp38 = tl.load(in_ptr1 + (x2 + 4 * y0), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp33, tmp39, tmp40) tmp42 = tl.where(tmp4, tmp32, tmp41) tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_clone_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x0 // 4) + 16 * (x1 % 4) + 64 * (x0 % 4) + x1 // 4), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(64)](buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](primals_3, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), reinterpret_tensor(buf1, (16, 4), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) buf3 = buf2 del buf2 buf4 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), buf3, reinterpret_tensor(primals_2, (4, 4 ), (1, 4), 0), beta=0) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_mul_sub_2[grid(4, 16)](buf5, buf1, buf6, 4, 16, XBLOCK=8, YBLOCK=4, num_warps=1, num_stages=1) buf7 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), reinterpret_tensor(buf6, (16, 4), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del buf0 del buf6 del primals_2 buf8 = buf7 del buf7 buf9 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_3[grid(64, 4)](buf1, buf3, buf5, buf8, buf9, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf1 del buf3 del buf5 buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32) triton_poi_fused_clone_view_4[grid(256)](buf9, buf10, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf9 buf11 = buf8 del buf8 extern_kernels.mm(buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), out=buf11) del primals_1 buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_5[grid(64)](buf12, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf12, reinterpret_tensor(buf10, (16, 16), (1, 16), 0) def cheb_conv(laplacian, inputs, weight): """Chebyshev convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. weight (:obj:`torch.Tensor`): The weights of the current layer. Returns: :obj:`torch.Tensor`: Inputs after applying Chebyshev convolution. """ B, V, Fin = inputs.shape K, Fin, Fout = weight.shape x0 = inputs.permute(1, 2, 0).contiguous() x0 = x0.view([V, Fin * B]) inputs = x0.unsqueeze(0) if K > 0: x1 = torch.sparse.mm(laplacian, x0) inputs = torch.cat((inputs, x1.unsqueeze(0)), 0) for _ in range(1, K - 1): x2 = 2 * torch.sparse.mm(laplacian, x1) - x0 inputs = torch.cat((inputs, x2.unsqueeze(0)), 0) x0, x1 = x1, x2 inputs = inputs.view([K, V, Fin, B]) inputs = inputs.permute(3, 1, 2, 0).contiguous() inputs = inputs.view([B * V, Fin * K]) weight = weight.view(Fin * K, Fout) inputs = inputs.matmul(weight) inputs = inputs.view([B, V, Fout]) return inputs class ChebConvNew(torch.nn.Module): """Graph convolutional layer. """ def __init__(self, in_channels, out_channels, kernel_size, bias=True, conv=cheb_conv): """Initialize the Chebyshev layer. Args: in_channels (int): Number of channels/features in the input graph. out_channels (int): Number of channels/features in the output graph. kernel_size (int): Number of trainable parameters per filter, which is also the size of the convolutional kernel. The order of the Chebyshev polynomials is kernel_size - 1. bias (bool): Whether to add a bias term. conv (callable): Function which will perform the actual convolution. """ super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self._conv = conv shape = kernel_size, in_channels, out_channels self.weight = torch.nn.Parameter(torch.Tensor(*shape)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.kaiming_initialization() def kaiming_initialization(self): """Initialize weights and bias. """ std = math.sqrt(2 / (self.in_channels * self.kernel_size)) self.weight.data.normal_(0, std) if self.bias is not None: self.bias.data.fill_(0.01) def forward(self, input_0, input_1): primals_1 = self.weight primals_4 = self.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
phil-hawkins/deepsphere-pytorch
ChebConv
false
16,261
[ "MIT" ]
99
f23c531445b3ddf234c7e98cdadb010163051e6d
https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d
RUM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.zeros] # Source node to ATen node mapping: # hidden => full # Graph fragment: # %full : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xb/cxbosxusiwxwrwo5dkh6drqbspkbcev33jm22liqqv6psw7uqyuv.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %full], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vl/cvlwz5anmpak5wji63alggacttzamcil7sqfwzefb7u64c27o3xi.py # Topologically Sorted Source Nodes: [x_emb, u_2, x_emb_1, u_5, x_emb_2, u_8], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min] # Source node to ATen node mapping: # u_2 => clamp_min, pow_1, pow_2, sum_1 # u_5 => clamp_min_3, pow_8, pow_9, sum_6 # u_8 => clamp_min_6, pow_15, pow_16, sum_11 # x_emb => add_2 # x_emb_1 => add_9 # x_emb_2 => add_16 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_2, %primals_7), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-12), kwargs = {}) # %add_9 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %primals_7), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_9, 2), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_8, [1], True), kwargs = {}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_6, 0.5), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_9, 1e-12), kwargs = {}) # %add_16 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_8, %primals_7), kwargs = {}) # %pow_15 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_16, 2), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_15, [1], True), kwargs = {}) # %pow_16 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_11, 0.5), kwargs = {}) # %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_16, 1e-12), kwargs = {}) triton_poi_fused_add_clamp_min_linalg_vector_norm_2 = async_compile.triton('triton_poi_fused_add_clamp_min_linalg_vector_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_linalg_vector_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3)) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp26 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp54 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = tmp26 + tmp2 tmp28 = tmp27 * tmp27 tmp30 = tmp29 + tmp7 tmp31 = tmp30 * tmp30 tmp32 = tmp28 + tmp31 tmp34 = tmp33 + tmp13 tmp35 = tmp34 * tmp34 tmp36 = tmp32 + tmp35 tmp38 = tmp37 + tmp19 tmp39 = tmp38 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = libdevice.sqrt(tmp40) tmp42 = triton_helpers.maximum(tmp41, tmp24) tmp44 = tmp43 + tmp2 tmp45 = tmp44 * tmp44 tmp47 = tmp46 + tmp7 tmp48 = tmp47 * tmp47 tmp49 = tmp45 + tmp48 tmp51 = tmp50 + tmp13 tmp52 = tmp51 * tmp51 tmp53 = tmp49 + tmp52 tmp55 = tmp54 + tmp19 tmp56 = tmp55 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp24) tl.store(out_ptr0 + (x0), tmp25, xmask) tl.store(out_ptr1 + (x0), tmp42, xmask) tl.store(out_ptr2 + (x0), tmp59, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ub/cub2tc5rnfgcnyzktegtqordxyxktrvw3sda4h2ddsiqtz4qa5jo.py # Topologically Sorted Source Nodes: [x_emb, u_2], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # u_2 => div # x_emb => add_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_2, %primals_7), kwargs = {}) # %div : [num_users=6] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %expand), kwargs = {}) triton_poi_fused_add_div_3 = async_compile.triton('triton_poi_fused_add_div_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fs/cfsko2dfeqwkh4q4lyejombxmrl2t7ureeppisbfaxfaaur262fa.py # Topologically Sorted Source Nodes: [r, normalize_1, mul, sum_1, mul_1, sum_2], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # normalize_1 => clamp_min_1, div_1, pow_3, pow_4, sum_2 # r => add # sum_1 => sum_3 # sum_2 => sum_4 # Graph fragment: # %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %primals_3), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %expand_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %add), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4 = async_compile.triton('triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3)) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp26 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = tmp3 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp8 / tmp25 tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp34 = tmp14 / tmp25 tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp38 = tmp20 / tmp25 tmp39 = tmp37 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = tmp26 * tmp3 tmp42 = tmp29 * tmp8 tmp43 = tmp41 + tmp42 tmp44 = tmp33 * tmp14 tmp45 = tmp43 + tmp44 tmp46 = tmp37 * tmp20 tmp47 = tmp45 + tmp46 tl.store(in_out_ptr0 + (x0), tmp40, xmask) tl.store(out_ptr0 + (x0), tmp47, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6d/c6dv7ob2xepxibkp5itjcuaziictgquanmkgoro536g4a2vni2jh.py # Topologically Sorted Source Nodes: [r, mul_2, sub_1], Original ATen: [aten.add, aten.mul, aten.sub] # Source node to ATen node mapping: # mul_2 => mul_2 # r => add # sub_1 => sub_1 # Graph fragment: # %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %primals_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %div), kwargs = {}) # %sub_1 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul_2), kwargs = {}) triton_poi_fused_add_mul_sub_5 = async_compile.triton('triton_poi_fused_add_mul_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 - tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dt/cdtuwjncvrjsebxoa4jvuenglwk74vf2sg6exqg7b65ns37esu2j.py # Topologically Sorted Source Nodes: [v], Original ATen: [aten.div] # Source node to ATen node mapping: # v => div_2 # Graph fragment: # %div_2 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %expand_2), kwargs = {}) triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bk/cbkkyt4ysniwfp3c7326ovdomh4zqe3yhfkp2ezibnigr7ji67ey.py # Topologically Sorted Source Nodes: [tmp], Original ATen: [aten.cat] # Source node to ATen node mapping: # tmp => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_4, %view_5], 1), kwargs = {}) triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 2 x0 = xindex % 4 x2 = (xindex // 8) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/32/c32uq45qq7fbtbh7hbiyebmyrcytvknjcvlzx2ommgcnausmqk3s.py # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %neg, %sqrt, %view], 1), kwargs = {}) triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp10 * tmp10 tmp12 = 1.0 tmp13 = tmp12 - tmp11 tmp14 = libdevice.sqrt(tmp13) tmp15 = -tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 3, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr0 + (x1), tmp21 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 * tmp22 tmp24 = tmp12 - tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 4, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tl.load(in_ptr0 + (x1), tmp28 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.where(tmp21, tmp27, tmp31) tmp33 = tl.where(tmp9, tmp17, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + (x2), tmp34, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sd/csd6lv32wy2a437s4u5jbqba4mdawa4qdpnaz6ktecyk3v3wzucf.py # Topologically Sorted Source Nodes: [u, x_emb, u_1, hidden_new, add_5, c, mul_3, sub_3, mul_4, new_h, x_emb_1, u_5], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.div] # Source node to ATen node mapping: # add_5 => add_5 # c => relu # hidden_new => add_4 # mul_3 => mul_3 # mul_4 => mul_4 # new_h => add_6 # sub_3 => sub_3 # u => add_1 # u_1 => sigmoid # u_5 => div_3 # x_emb => add_2 # x_emb_1 => add_9 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_5), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_2, %primals_7), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%full, %view_30), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %add_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_5,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %full), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %relu), kwargs = {}) # %add_6 : [num_users=6] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %add_9 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %primals_7), kwargs = {}) # %div_3 : [num_users=6] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_9, %expand_17), kwargs = {}) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9 = async_compile.triton('triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr1 + (x2), xmask) tmp8 = tl.load(in_ptr2 + (x2), xmask) tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x2), xmask) tmp15 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + (x2), xmask) tmp25 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last') tmp1 = -tmp0 tmp3 = tmp1 - tmp2 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp6 + tmp5 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp6 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tmp24 = tmp23 + tmp9 tmp26 = tmp24 / tmp25 tl.store(in_out_ptr0 + (x2), tmp13, xmask) tl.store(out_ptr0 + (x2), tmp22, xmask) tl.store(out_ptr1 + (x2), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kg/ckgygdmwt3xdx3lejccgqkvciofehblhr2255a2cixfclmce2xew.py # Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_4 => cat_4 # Graph fragment: # %cat_4 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_1, %add_6], 1), kwargs = {}) triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qj/cqjzsr4bwkt5n5du242lc6pf3ik4vdea44uwxzec7v6gacgan6gn.py # Topologically Sorted Source Nodes: [u_3, x_emb_1, u_4, hidden_new_1, add_12, c_1, mul_8, sub_7, mul_9, new_h_1, x_emb_2, u_8], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.div] # Source node to ATen node mapping: # add_12 => add_12 # c_1 => relu_1 # hidden_new_1 => add_11 # mul_8 => mul_8 # mul_9 => mul_9 # new_h_1 => add_13 # sub_7 => sub_7 # u_3 => add_8 # u_4 => sigmoid_1 # u_8 => div_6 # x_emb_1 => add_9 # x_emb_2 => add_16 # Graph fragment: # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_4, %primals_5), kwargs = {}) # %add_9 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %primals_7), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_8,), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %view_61), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %add_9), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_12,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %add_6), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %relu_1), kwargs = {}) # %add_13 : [num_users=6] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %mul_9), kwargs = {}) # %add_16 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_8, %primals_7), kwargs = {}) # %div_6 : [num_users=6] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_16, %expand_34), kwargs = {}) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11 = async_compile.triton('triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x2), xmask) tmp8 = tl.load(in_ptr3 + (x2), xmask) tmp9 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x2), xmask) tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr7 + (x2), xmask) tmp25 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last') tmp2 = -tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp0 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tmp24 = tmp23 + tmp9 tmp26 = tmp24 / tmp25 tl.store(in_out_ptr0 + (x2), tmp13, xmask) tl.store(out_ptr0 + (x2), tmp22, xmask) tl.store(out_ptr1 + (x2), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m6/cm6kwumswycqvynoccf6e2jeft2h5sxnaunbi4llwc66a5pbnwhm.py # Topologically Sorted Source Nodes: [cat_8], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_8 => cat_8 # Graph fragment: # %cat_8 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_2, %add_13], 1), kwargs = {}) triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4q/c4qv6nucfhstuc3vn2oeun7sutq6v4tcaatwnmjld3fffkbmtsvg.py # Topologically Sorted Source Nodes: [x_emb_3, u_11], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min] # Source node to ATen node mapping: # u_11 => clamp_min_9, pow_22, pow_23, sum_16 # x_emb_3 => add_23 # Graph fragment: # %add_23 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_11, %primals_7), kwargs = {}) # %pow_22 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_23, 2), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_22, [1], True), kwargs = {}) # %pow_23 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_16, 0.5), kwargs = {}) # %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_23, 1e-12), kwargs = {}) triton_poi_fused_add_clamp_min_linalg_vector_norm_13 = async_compile.triton('triton_poi_fused_add_clamp_min_linalg_vector_norm_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_linalg_vector_norm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3)) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tl.store(out_ptr0 + (x0), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sy/csy2cbhsijngnmsaagfunjyoysgy7ye6fm6kdjs7ys4h7qvi7aju.py # Topologically Sorted Source Nodes: [cat_12], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_12 => cat_12 # Graph fragment: # %cat_12 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_3, %add_20], 1), kwargs = {}) triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (12 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sq/csqnrwv5si7q2rppa6qho3zylmtdwgkh4tljpristd6hyetygrys.py # Topologically Sorted Source Nodes: [u_9, x_emb_3, u_10, hidden_new_3, add_26, c_3, mul_18, sub_15, mul_19, new_h_3], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_26 => add_26 # c_3 => relu_3 # hidden_new_3 => add_25 # mul_18 => mul_18 # mul_19 => mul_19 # new_h_3 => add_27 # sub_15 => sub_15 # u_10 => sigmoid_3 # u_9 => add_22 # x_emb_3 => add_23 # Graph fragment: # %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_10, %primals_5), kwargs = {}) # %add_23 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_11, %primals_7), kwargs = {}) # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_22,), kwargs = {}) # %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %view_123), kwargs = {}) # %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %add_23), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_26,), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %add_20), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_3), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %relu_3), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_18, %mul_19), kwargs = {}) triton_poi_fused_add_mul_relu_rsub_sigmoid_15 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x2), xmask) tmp8 = tl.load(in_ptr3 + (x2), xmask) tmp9 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x2), xmask) tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp2 = -tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp0 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tl.store(in_out_ptr0 + (x2), tmp13, xmask) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3j/c3jscoh7c2443vsxz67t65jqgzedijdwxk44qfu5ybsp22org3ns.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat_16 # Graph fragment: # %cat_16 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_6, %add_13, %add_20, %add_27], 1), kwargs = {}) triton_poi_fused_stack_16 = async_compile.triton('triton_poi_fused_stack_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.zeros] stream0 = get_raw_stream(0) triton_poi_fused_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, buf1, 32, grid=grid(32), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(buf1, primals_2, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(buf1, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_6, out=buf4) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_6, out=buf26) buf48 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_6, out=buf48) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf27 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf49 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [x_emb, u_2, x_emb_1, u_5, x_emb_2, u_8], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min] triton_poi_fused_add_clamp_min_linalg_vector_norm_2.run(buf4, primals_7, buf26, buf48, buf5, buf27, buf49, 4, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_emb, u_2], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_3.run(buf4, primals_7, buf5, buf6, 16, grid=grid(16), stream=stream0) buf7 = buf5; del buf5 # reuse buf8 = reinterpret_tensor(buf7, (4, ), (1, ), 0); del buf7 # reuse buf9 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [r, normalize_1, mul, sum_1, mul_1, sum_2], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum] triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4.run(buf8, buf2, primals_3, buf6, buf9, 4, grid=grid(4), stream=stream0) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [r, mul_2, sub_1], Original ATen: [aten.add, aten.mul, aten.sub] triton_poi_fused_add_mul_sub_5.run(buf2, primals_3, buf9, buf6, buf10, 16, grid=grid(16), stream=stream0) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.div] triton_poi_fused_div_6.run(buf10, buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 1), 0), buf12, out=buf13) buf14 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf11, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0), buf14, out=buf15) buf16 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf6, buf11, buf16, 32, grid=grid(32), stream=stream0) buf17 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp, matmul_7], Original ATen: [aten.cat, aten.bmm] extern_kernels.bmm(buf16, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_8.run(buf8, buf18, 16, grid=grid(16), stream=stream0) buf19 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf18, (4, 2, 2), (4, 2, 1), 0), buf17, out=buf19) buf20 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf16, (4, 4, 2), (8, 1, 4), 0), buf19, out=buf20) buf21 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0); del buf13 # reuse buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [u, x_emb, u_1, hidden_new, add_5, c, mul_3, sub_3, mul_4, new_h, x_emb_1, u_5], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.div] triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9.run(buf21, buf15, buf20, buf4, primals_7, buf3, primals_5, buf26, buf27, buf22, buf28, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat] triton_poi_fused_cat_10.run(primals_1, buf22, buf23, 32, grid=grid(32), stream=stream0) buf24 = reinterpret_tensor(buf20, (4, 4), (4, 1), 0); del buf20 # reuse # Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.mm] extern_kernels.mm(buf23, primals_2, out=buf24) buf25 = reinterpret_tensor(buf15, (4, 4), (4, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.mm] extern_kernels.mm(buf23, primals_4, out=buf25) buf29 = buf27; del buf27 # reuse buf30 = reinterpret_tensor(buf29, (4, ), (1, ), 0); del buf29 # reuse buf31 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [r_1, normalize_4, mul_5, sum_3, mul_6, sum_4], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum] triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4.run(buf30, buf24, primals_3, buf28, buf31, 4, grid=grid(4), stream=stream0) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [r_1, mul_7, sub_5], Original ATen: [aten.add, aten.mul, aten.sub] triton_poi_fused_add_mul_sub_5.run(buf24, primals_3, buf31, buf28, buf32, 16, grid=grid(16), stream=stream0) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v_1], Original ATen: [aten.div] triton_poi_fused_div_6.run(buf32, buf33, 16, grid=grid(16), stream=stream0) buf34 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp_1], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf28, buf33, buf34, 32, grid=grid(32), stream=stream0) buf35 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf28, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf35) buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf28, (4, 4, 1), (4, 1, 1), 0), buf35, out=buf36) buf37 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf33, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf37) buf38 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 1), 0), buf37, out=buf38) buf39 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm] extern_kernels.bmm(buf34, reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf39) buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_6], Original ATen: [aten.cat] triton_poi_fused_cat_8.run(buf30, buf40, 16, grid=grid(16), stream=stream0) buf41 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_18], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf40, (4, 2, 2), (4, 2, 1), 0), buf39, out=buf41) buf42 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_19], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf34, (4, 4, 2), (8, 1, 4), 0), buf41, out=buf42) buf43 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0); del buf36 # reuse buf44 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf50 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [u_3, x_emb_1, u_4, hidden_new_1, add_12, c_1, mul_8, sub_7, mul_9, new_h_1, x_emb_2, u_8], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.div] triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11.run(buf43, buf22, buf38, buf42, buf26, primals_7, buf25, primals_5, buf48, buf49, buf44, buf50, 16, grid=grid(16), stream=stream0) buf45 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_8], Original ATen: [aten.cat] triton_poi_fused_cat_12.run(primals_1, buf44, buf45, 32, grid=grid(32), stream=stream0) buf46 = reinterpret_tensor(buf42, (4, 4), (4, 1), 0); del buf42 # reuse # Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.mm] extern_kernels.mm(buf45, primals_2, out=buf46) buf47 = reinterpret_tensor(buf38, (4, 4), (4, 1), 0); del buf38 # reuse # Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.mm] extern_kernels.mm(buf45, primals_4, out=buf47) buf51 = buf49; del buf49 # reuse buf52 = reinterpret_tensor(buf51, (4, ), (1, ), 0); del buf51 # reuse buf53 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [r_2, normalize_7, mul_10, sum_5, mul_11, sum_6], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum] triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4.run(buf52, buf46, primals_3, buf50, buf53, 4, grid=grid(4), stream=stream0) buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [r_2, mul_12, sub_9], Original ATen: [aten.add, aten.mul, aten.sub] triton_poi_fused_add_mul_sub_5.run(buf46, primals_3, buf53, buf50, buf54, 16, grid=grid(16), stream=stream0) buf55 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v_2], Original ATen: [aten.div] triton_poi_fused_div_6.run(buf54, buf55, 16, grid=grid(16), stream=stream0) buf56 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp_2], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf50, buf55, buf56, 32, grid=grid(32), stream=stream0) buf57 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf50, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf57) buf58 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf50, (4, 4, 1), (4, 1, 1), 0), buf57, out=buf58) buf59 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf55, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf59) buf60 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf55, (4, 4, 1), (4, 1, 1), 0), buf59, out=buf60) buf61 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm] extern_kernels.bmm(buf56, reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf61) buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_10], Original ATen: [aten.cat] triton_poi_fused_cat_8.run(buf52, buf62, 16, grid=grid(16), stream=stream0) buf63 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf62, (4, 2, 2), (4, 2, 1), 0), buf61, out=buf63) buf64 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf56, (4, 4, 2), (8, 1, 4), 0), buf63, out=buf64) buf70 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12), primals_6, out=buf70) del primals_6 buf71 = reinterpret_tensor(buf52, (4, 1), (1, 4), 0); del buf52 # reuse # Topologically Sorted Source Nodes: [x_emb_3, u_11], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min] triton_poi_fused_add_clamp_min_linalg_vector_norm_13.run(buf70, primals_7, buf71, 4, grid=grid(4), stream=stream0) buf65 = reinterpret_tensor(buf58, (4, 4), (4, 1), 0); del buf58 # reuse buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf72 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [u_6, x_emb_2, u_7, hidden_new_2, add_19, c_2, mul_13, sub_11, mul_14, new_h_2, x_emb_3, u_11], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.div] triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11.run(buf65, buf44, buf60, buf64, buf48, primals_7, buf47, primals_5, buf70, buf71, buf66, buf72, 16, grid=grid(16), stream=stream0) buf67 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_12], Original ATen: [aten.cat] triton_poi_fused_cat_14.run(primals_1, buf66, buf67, 32, grid=grid(32), stream=stream0) buf68 = reinterpret_tensor(buf64, (4, 4), (4, 1), 0); del buf64 # reuse # Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.mm] extern_kernels.mm(buf67, primals_2, out=buf68) buf69 = reinterpret_tensor(buf60, (4, 4), (4, 1), 0); del buf60 # reuse # Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.mm] extern_kernels.mm(buf67, primals_4, out=buf69) buf73 = buf71; del buf71 # reuse buf74 = reinterpret_tensor(buf73, (4, ), (1, ), 0); del buf73 # reuse buf75 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [r_3, normalize_10, mul_15, sum_7, mul_16, sum_8], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum] triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4.run(buf74, buf68, primals_3, buf72, buf75, 4, grid=grid(4), stream=stream0) buf76 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [r_3, mul_17, sub_13], Original ATen: [aten.add, aten.mul, aten.sub] triton_poi_fused_add_mul_sub_5.run(buf68, primals_3, buf75, buf72, buf76, 16, grid=grid(16), stream=stream0) buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v_3], Original ATen: [aten.div] triton_poi_fused_div_6.run(buf76, buf77, 16, grid=grid(16), stream=stream0) buf78 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp_3], Original ATen: [aten.cat] triton_poi_fused_cat_7.run(buf72, buf77, buf78, 32, grid=grid(32), stream=stream0) buf79 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf72, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf79) buf80 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_34], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf72, (4, 4, 1), (4, 1, 1), 0), buf79, out=buf80) buf81 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_35], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf77, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf81) buf82 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf77, (4, 4, 1), (4, 1, 1), 0), buf81, out=buf82) buf83 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm] extern_kernels.bmm(buf78, reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf83) buf84 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_14], Original ATen: [aten.cat] triton_poi_fused_cat_8.run(buf74, buf84, 16, grid=grid(16), stream=stream0) del buf74 buf85 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf84, (4, 2, 2), (4, 2, 1), 0), buf83, out=buf85) buf86 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf78, (4, 4, 2), (8, 1, 4), 0), buf85, out=buf86) buf87 = reinterpret_tensor(buf80, (4, 4), (4, 1), 0); del buf80 # reuse buf88 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [u_9, x_emb_3, u_10, hidden_new_3, add_26, c_3, mul_18, sub_15, mul_19, new_h_3], Original ATen: [aten.add, aten.sigmoid, aten.relu, aten.mul, aten.rsub] triton_poi_fused_add_mul_relu_rsub_sigmoid_15.run(buf87, buf66, buf82, buf86, buf70, primals_7, buf69, primals_5, buf88, 16, grid=grid(16), stream=stream0) del buf82 del buf86 buf89 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] triton_poi_fused_stack_16.run(buf22, buf44, buf66, buf88, buf89, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf89, (4, 4, 4), (16, 4, 1), 0), buf88, primals_3, primals_5, primals_7, buf0, buf2, buf3, buf4, buf6, reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf10, buf11, buf21, buf22, buf24, buf25, buf26, buf28, reinterpret_tensor(buf31, (4, 1), (1, 1), 0), buf32, buf33, buf34, buf43, buf44, buf46, buf47, buf48, buf50, reinterpret_tensor(buf53, (4, 1), (1, 1), 0), buf54, buf55, buf56, buf65, buf66, buf68, buf69, buf70, buf72, reinterpret_tensor(buf75, (4, 1), (1, 1), 0), buf76, buf77, buf78, buf87, reinterpret_tensor(buf85, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf84, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf83, (4, 1, 2), (2, 1, 1), 0), buf81, buf79, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(buf67, (8, 4), (1, 8), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), reinterpret_tensor(buf63, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf62, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf61, (4, 1, 2), (2, 1, 1), 0), buf59, buf57, reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf45, (8, 4), (1, 8), 0), reinterpret_tensor(buf41, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf40, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf39, (4, 1, 2), (2, 1, 1), 0), buf37, buf35, reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf23, (8, 4), (1, 8), 0), buf16, reinterpret_tensor(buf19, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf18, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf17, (4, 1, 2), (2, 1, 1), 0), buf14, buf12, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), reinterpret_tensor(buf1, (8, 4), (1, 8), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def rotation_components(x, y, eps=1e-12): size_batch = x.size()[0] hidden_size = x.size()[1] u = F.normalize(x, p=2, dim=1, eps=eps) costh = torch.sum(u * F.normalize(y, p=2, dim=1, eps=eps), dim=1).view( size_batch, 1) sinth = torch.sqrt(1 - costh ** 2).view(size_batch, 1) Rth = torch.cat((costh, -sinth, sinth, costh), dim=1).view(size_batch, 2, 2 ) v = F.normalize(y - torch.sum(u * y, 1).view(size_batch, 1) * u, p=2, dim=1, eps=eps) tmp = torch.cat((u.view(size_batch, 1, hidden_size), v.view(size_batch, 1, hidden_size)), dim=1) return u.view(size_batch, hidden_size, 1), v.view(size_batch, hidden_size, 1), tmp, Rth def rotate(v1, v2, v): size_batch = v1.size()[0] hidden_size = v1.size()[1] U = rotation_components(v1, v2) h = v.view(size_batch, hidden_size, 1) return v + (-torch.matmul(U[0], torch.matmul(U[0].transpose(1, 2), h)) - torch.matmul(U[1], torch.matmul(U[1].transpose(1, 2), h)) + torch. matmul(U[2].transpose(1, 2), torch.matmul(U[3], torch.matmul(U[2], h))) ).view(size_batch, hidden_size) def rotation_operator(x, y, eps=1e-12): hidden_size = x.size()[1] tmp_u, tmp_v, tmp, Rth = rotation_components(x, y, eps=eps) return torch.eye(hidden_size, device=x.device) - torch.matmul(tmp_u, torch.transpose(tmp_u, dim0=1, dim1=2)) - torch.matmul(tmp_v, torch .transpose(tmp_v, dim0=1, dim1=2)) + torch.matmul(torch.matmul( torch.transpose(tmp, dim0=1, dim1=2), Rth), tmp) class RUMCell(nn.Module): def __init__(self, input_size, hidden_size, eta_, lambda_, bias, eps, activation): super(RUMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.eta_ = eta_ self.lambda_ = lambda_ self.bias = bias self.eps = eps if activation is None: activation = 'relu' if activation == 'tanh': self.activation = F.tanh elif activation == 'relu': self.activation = F.relu self.r_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.r_kernel) self.r_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.r_bias, 1.0) self.u_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.u_kernel) self.u_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.u_bias, 1.0) self.inp_emb_kernel = nn.Parameter(torch.Tensor(input_size, hidden_size)) nn.init.xavier_uniform_(self.inp_emb_kernel) self.inp_emb_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.inp_emb_bias, 0.0) def forward(self, inputs, hidden, assoc_mem): if hidden is None: hidden = torch.zeros(inputs.size(0), self.hidden_size, device= inputs.device) if assoc_mem is None and self.lambda_ == 1: assoc_mem = torch.eye(self.hidden_size, device=inputs.device ).unsqueeze(0) r = torch.matmul(torch.cat((inputs, hidden), 1), self.r_kernel ) + self.r_bias u = torch.matmul(torch.cat((inputs, hidden), 1), self.u_kernel ) + self.u_bias x_emb = torch.matmul(inputs, self.inp_emb_kernel) + self.inp_emb_bias u = u.sigmoid() if self.lambda_ == 0: hidden_new = rotate(x_emb, r, hidden) elif self.lambda_ == 1: tmp_rotation = rotation_operator(x_emb, r) assoc_mem = torch.matmul(assoc_mem, tmp_rotation) hidden_new = torch.matmul(assoc_mem, hidden.unsqueeze(-1)).squeeze( -1) else: raise c = F.relu(hidden_new + x_emb) new_h = u * hidden + (1 - u) * c if self.eta_: new_h = F.normalize(new_h, p=2, dim=1, eps=self.eps) * self.eta_ return new_h, assoc_mem class RUM(nn.Module): def __init__(self, input_size, hidden_size, eta_=None, lambda_=0, bias= True, eps=1e-12, activation=None): super().__init__() self.rum_cell = RUMCell(input_size, hidden_size, eta_, lambda_, bias, eps, activation) def forward(self, input_, hidden=None, assoc_mem=None): outputs = [] for x in torch.unbind(input_, dim=1): hidden, assoc_mem = self.rum_cell(x, hidden, assoc_mem) outputs.append(hidden) return torch.stack(outputs, dim=1), hidden def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp26 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp43 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp54 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = tmp26 + tmp2 tmp28 = tmp27 * tmp27 tmp30 = tmp29 + tmp7 tmp31 = tmp30 * tmp30 tmp32 = tmp28 + tmp31 tmp34 = tmp33 + tmp13 tmp35 = tmp34 * tmp34 tmp36 = tmp32 + tmp35 tmp38 = tmp37 + tmp19 tmp39 = tmp38 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = libdevice.sqrt(tmp40) tmp42 = triton_helpers.maximum(tmp41, tmp24) tmp44 = tmp43 + tmp2 tmp45 = tmp44 * tmp44 tmp47 = tmp46 + tmp7 tmp48 = tmp47 * tmp47 tmp49 = tmp45 + tmp48 tmp51 = tmp50 + tmp13 tmp52 = tmp51 * tmp51 tmp53 = tmp49 + tmp52 tmp55 = tmp54 + tmp19 tmp56 = tmp55 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp24) tl.store(out_ptr0 + x0, tmp25, xmask) tl.store(out_ptr1 + x0, tmp42, xmask) tl.store(out_ptr2 + x0, tmp59, xmask) @triton.jit def triton_poi_fused_add_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4(in_out_ptr0 , in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp26 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = tmp3 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp8 / tmp25 tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp34 = tmp14 / tmp25 tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp38 = tmp20 / tmp25 tmp39 = tmp37 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = tmp26 * tmp3 tmp42 = tmp29 * tmp8 tmp43 = tmp41 + tmp42 tmp44 = tmp33 * tmp14 tmp45 = tmp43 + tmp44 tmp46 = tmp37 * tmp20 tmp47 = tmp45 + tmp46 tl.store(in_out_ptr0 + x0, tmp40, xmask) tl.store(out_ptr0 + x0, tmp47, xmask) @triton.jit def triton_poi_fused_add_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 - tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 2 x0 = xindex % 4 x2 = xindex // 8 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_cat_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp10 * tmp10 tmp12 = 1.0 tmp13 = tmp12 - tmp11 tmp14 = libdevice.sqrt(tmp13) tmp15 = -tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 3, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr0 + x1, tmp21 & xmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tmp22 * tmp22 tmp24 = tmp12 - tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tl.full([1], 4, tl.int64) tmp31 = tl.load(in_ptr0 + x1, tmp28 & xmask, eviction_policy= 'evict_last', other=0.0) tmp32 = tl.where(tmp21, tmp27, tmp31) tmp33 = tl.where(tmp9, tmp17, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x2, tmp34, xmask) @triton.jit def triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x2, xmask) tmp8 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask) tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x2, xmask) tmp25 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp1 = -tmp0 tmp3 = tmp1 - tmp2 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp6 + tmp5 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp6 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tmp24 = tmp23 + tmp9 tmp26 = tmp24 / tmp25 tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp8 = tl.load(in_ptr3 + x2, xmask) tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x2, xmask) tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr7 + x2, xmask) tmp25 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp2 = -tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp0 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tmp24 = tmp23 + tmp9 tmp26 = tmp24 / tmp25 tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tl.store(out_ptr0 + x0, tmp25, xmask) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (12 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp8 = tl.load(in_ptr3 + x2, xmask) tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x2, xmask) tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp2 = -tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp0 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_stack_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_2, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_6, out=buf4) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_6, out=buf26) buf48 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_6, out=buf48) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf27 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf49 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_add_clamp_min_linalg_vector_norm_2[grid(4)](buf4, primals_7, buf26, buf48, buf5, buf27, buf49, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_3[grid(16)](buf4, primals_7, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 buf8 = reinterpret_tensor(buf7, (4,), (1,), 0) del buf7 buf9 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf8, buf2, primals_3, buf6, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf2, primals_3, buf9, buf6, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 1), 0 ), buf12, out=buf13) buf14 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0), buf14, out=buf15) buf16 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf6, buf11, buf16, 32, XBLOCK=32, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf16, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf8, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf18, (4, 2, 2), (4, 2, 1), 0), buf17, out=buf19) buf20 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf16, (4, 4, 2), (8, 1, 4), 0), buf19, out=buf20) buf21 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9[grid(16)](buf21, buf15, buf20, buf4, primals_7, buf3, primals_5, buf26, buf27, buf22, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_10[grid(32)](primals_1, buf22, buf23, 32, XBLOCK=32, num_warps=1, num_stages=1) buf24 = reinterpret_tensor(buf20, (4, 4), (4, 1), 0) del buf20 extern_kernels.mm(buf23, primals_2, out=buf24) buf25 = reinterpret_tensor(buf15, (4, 4), (4, 1), 0) del buf15 extern_kernels.mm(buf23, primals_4, out=buf25) buf29 = buf27 del buf27 buf30 = reinterpret_tensor(buf29, (4,), (1,), 0) del buf29 buf31 = buf8 del buf8 triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf30, buf24, primals_3, buf28, buf31, 4, XBLOCK=4, num_warps =1, num_stages=1) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf24, primals_3, buf31, buf28, buf32, 16, XBLOCK=16, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf28, buf33, buf34, 32, XBLOCK=32, num_warps=1, num_stages=1) buf35 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf28, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf35) buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf28, (4, 4, 1), (4, 1, 1), 0), buf35, out=buf36) buf37 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf33, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf37) buf38 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 1), 0), buf37, out=buf38) buf39 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf34, reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf39) buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf30, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf40, (4, 2, 2), (4, 2, 1), 0), buf39, out=buf41) buf42 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf34, (4, 4, 2), (8, 1, 4), 0), buf41, out=buf42) buf43 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0) del buf36 buf44 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf50 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11[grid(16)](buf43, buf22, buf38, buf42, buf26, primals_7, buf25, primals_5, buf48, buf49, buf44, buf50, 16, XBLOCK=16, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_12[grid(32)](primals_1, buf44, buf45, 32, XBLOCK=32, num_warps=1, num_stages=1) buf46 = reinterpret_tensor(buf42, (4, 4), (4, 1), 0) del buf42 extern_kernels.mm(buf45, primals_2, out=buf46) buf47 = reinterpret_tensor(buf38, (4, 4), (4, 1), 0) del buf38 extern_kernels.mm(buf45, primals_4, out=buf47) buf51 = buf49 del buf49 buf52 = reinterpret_tensor(buf51, (4,), (1,), 0) del buf51 buf53 = buf30 del buf30 triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf52, buf46, primals_3, buf50, buf53, 4, XBLOCK=4, num_warps =1, num_stages=1) buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf46, primals_3, buf53, buf50, buf54, 16, XBLOCK=16, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf54, buf55, 16, XBLOCK=16, num_warps=1, num_stages=1) buf56 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf50, buf55, buf56, 32, XBLOCK=32, num_warps=1, num_stages=1) buf57 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf50, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf57) buf58 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf50, (4, 4, 1), (4, 1, 1), 0), buf57, out=buf58) buf59 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf55, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf59) buf60 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf55, (4, 4, 1), (4, 1, 1), 0), buf59, out=buf60) buf61 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf56, reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf61) buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf52, buf62, 16, XBLOCK=16, num_warps=1, num_stages=1) buf63 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf62, (4, 2, 2), (4, 2, 1), 0), buf61, out=buf63) buf64 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf56, (4, 4, 2), (8, 1, 4), 0), buf63, out=buf64) buf70 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12 ), primals_6, out=buf70) del primals_6 buf71 = reinterpret_tensor(buf52, (4, 1), (1, 4), 0) del buf52 triton_poi_fused_add_clamp_min_linalg_vector_norm_13[grid(4)](buf70, primals_7, buf71, 4, XBLOCK=4, num_warps=1, num_stages=1) buf65 = reinterpret_tensor(buf58, (4, 4), (4, 1), 0) del buf58 buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf72 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11[grid(16)](buf65, buf44, buf60, buf64, buf48, primals_7, buf47, primals_5, buf70, buf71, buf66, buf72, 16, XBLOCK=16, num_warps=1, num_stages=1) buf67 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_14[grid(32)](primals_1, buf66, buf67, 32, XBLOCK=32, num_warps=1, num_stages=1) buf68 = reinterpret_tensor(buf64, (4, 4), (4, 1), 0) del buf64 extern_kernels.mm(buf67, primals_2, out=buf68) buf69 = reinterpret_tensor(buf60, (4, 4), (4, 1), 0) del buf60 extern_kernels.mm(buf67, primals_4, out=buf69) buf73 = buf71 del buf71 buf74 = reinterpret_tensor(buf73, (4,), (1,), 0) del buf73 buf75 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf74, buf68, primals_3, buf72, buf75, 4, XBLOCK=4, num_warps =1, num_stages=1) buf76 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf68, primals_3, buf75, buf72, buf76, 16, XBLOCK=16, num_warps=1, num_stages=1) buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf76, buf77, 16, XBLOCK=16, num_warps=1, num_stages=1) buf78 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf72, buf77, buf78, 32, XBLOCK=32, num_warps=1, num_stages=1) buf79 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf72, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf79) buf80 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf72, (4, 4, 1), (4, 1, 1), 0), buf79, out=buf80) buf81 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf77, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf81) buf82 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf77, (4, 4, 1), (4, 1, 1), 0), buf81, out=buf82) buf83 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf78, reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf83) buf84 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf74, buf84, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf74 buf85 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf84, (4, 2, 2), (4, 2, 1), 0), buf83, out=buf85) buf86 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf78, (4, 4, 2), (8, 1, 4), 0), buf85, out=buf86) buf87 = reinterpret_tensor(buf80, (4, 4), (4, 1), 0) del buf80 buf88 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_relu_rsub_sigmoid_15[grid(16)](buf87, buf66, buf82, buf86, buf70, primals_7, buf69, primals_5, buf88, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf82 del buf86 buf89 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_stack_16[grid(64)](buf22, buf44, buf66, buf88, buf89, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf89, (4, 4, 4), (16, 4, 1), 0), buf88, primals_3, primals_5, primals_7, buf0, buf2, buf3, buf4, buf6, reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf10, buf11, buf21, buf22, buf24, buf25, buf26, buf28, reinterpret_tensor(buf31, (4, 1), (1, 1), 0), buf32, buf33, buf34, buf43, buf44, buf46, buf47, buf48, buf50, reinterpret_tensor(buf53, (4, 1), (1, 1), 0), buf54, buf55, buf56, buf65, buf66, buf68, buf69, buf70, buf72, reinterpret_tensor (buf75, (4, 1), (1, 1), 0), buf76, buf77, buf78, buf87, reinterpret_tensor(buf85, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf84, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf83, (4, 1, 2), (2, 1, 1), 0), buf81, buf79, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(buf67, (8, 4), (1, 8), 0), reinterpret_tensor( primals_4, (4, 8), (1, 4), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), reinterpret_tensor(buf63, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf62, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf61, (4, 1, 2), (2, 1, 1), 0), buf59, buf57, reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf45, (8, 4), (1, 8), 0), reinterpret_tensor( buf41, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf40, (4, 2, 2 ), (4, 1, 2), 0), reinterpret_tensor(buf39, (4, 1, 2), (2, 1, 1), 0 ), buf37, buf35, reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf23, (8, 4), (1, 8), 0), buf16, reinterpret_tensor(buf19, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf18, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf17, (4, 1, 2), (2, 1, 1), 0), buf14, buf12, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), reinterpret_tensor(buf1, (8, 4), (1, 8), 0)) def rotation_components(x, y, eps=1e-12): size_batch = x.size()[0] hidden_size = x.size()[1] u = F.normalize(x, p=2, dim=1, eps=eps) costh = torch.sum(u * F.normalize(y, p=2, dim=1, eps=eps), dim=1).view( size_batch, 1) sinth = torch.sqrt(1 - costh ** 2).view(size_batch, 1) Rth = torch.cat((costh, -sinth, sinth, costh), dim=1).view(size_batch, 2, 2 ) v = F.normalize(y - torch.sum(u * y, 1).view(size_batch, 1) * u, p=2, dim=1, eps=eps) tmp = torch.cat((u.view(size_batch, 1, hidden_size), v.view(size_batch, 1, hidden_size)), dim=1) return u.view(size_batch, hidden_size, 1), v.view(size_batch, hidden_size, 1), tmp, Rth def rotate(v1, v2, v): size_batch = v1.size()[0] hidden_size = v1.size()[1] U = rotation_components(v1, v2) h = v.view(size_batch, hidden_size, 1) return v + (-torch.matmul(U[0], torch.matmul(U[0].transpose(1, 2), h)) - torch.matmul(U[1], torch.matmul(U[1].transpose(1, 2), h)) + torch. matmul(U[2].transpose(1, 2), torch.matmul(U[3], torch.matmul(U[2], h))) ).view(size_batch, hidden_size) def rotation_operator(x, y, eps=1e-12): hidden_size = x.size()[1] tmp_u, tmp_v, tmp, Rth = rotation_components(x, y, eps=eps) return torch.eye(hidden_size, device=x.device) - torch.matmul(tmp_u, torch.transpose(tmp_u, dim0=1, dim1=2)) - torch.matmul(tmp_v, torch .transpose(tmp_v, dim0=1, dim1=2)) + torch.matmul(torch.matmul( torch.transpose(tmp, dim0=1, dim1=2), Rth), tmp) class RUMCell(nn.Module): def __init__(self, input_size, hidden_size, eta_, lambda_, bias, eps, activation): super(RUMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.eta_ = eta_ self.lambda_ = lambda_ self.bias = bias self.eps = eps if activation is None: activation = 'relu' if activation == 'tanh': self.activation = F.tanh elif activation == 'relu': self.activation = F.relu self.r_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.r_kernel) self.r_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.r_bias, 1.0) self.u_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.u_kernel) self.u_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.u_bias, 1.0) self.inp_emb_kernel = nn.Parameter(torch.Tensor(input_size, hidden_size)) nn.init.xavier_uniform_(self.inp_emb_kernel) self.inp_emb_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.inp_emb_bias, 0.0) def forward(self, inputs, hidden, assoc_mem): if hidden is None: hidden = torch.zeros(inputs.size(0), self.hidden_size, device= inputs.device) if assoc_mem is None and self.lambda_ == 1: assoc_mem = torch.eye(self.hidden_size, device=inputs.device ).unsqueeze(0) r = torch.matmul(torch.cat((inputs, hidden), 1), self.r_kernel ) + self.r_bias u = torch.matmul(torch.cat((inputs, hidden), 1), self.u_kernel ) + self.u_bias x_emb = torch.matmul(inputs, self.inp_emb_kernel) + self.inp_emb_bias u = u.sigmoid() if self.lambda_ == 0: hidden_new = rotate(x_emb, r, hidden) elif self.lambda_ == 1: tmp_rotation = rotation_operator(x_emb, r) assoc_mem = torch.matmul(assoc_mem, tmp_rotation) hidden_new = torch.matmul(assoc_mem, hidden.unsqueeze(-1)).squeeze( -1) else: raise c = F.relu(hidden_new + x_emb) new_h = u * hidden + (1 - u) * c if self.eta_: new_h = F.normalize(new_h, p=2, dim=1, eps=self.eps) * self.eta_ return new_h, assoc_mem class RUMNew(nn.Module): def __init__(self, input_size, hidden_size, eta_=None, lambda_=0, bias= True, eps=1e-12, activation=None): super().__init__() self.rum_cell = RUMCell(input_size, hidden_size, eta_, lambda_, bias, eps, activation) def forward(self, input_0): primals_2 = self.rum_cell.r_kernel primals_3 = self.rum_cell.r_bias primals_4 = self.rum_cell.u_kernel primals_5 = self.rum_cell.u_bias primals_6 = self.rum_cell.inp_emb_kernel primals_7 = self.rum_cell.inp_emb_bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
omri123/rotational-unit-of-memory
RUM
false
16,262
[ "MIT" ]
82
e796c841e1e837df09497ba77c3bc285db47d02d
https://github.com/omri123/rotational-unit-of-memory/tree/e796c841e1e837df09497ba77c3bc285db47d02d
ContrastiveDistanceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ef/cefub2ep7wmbkbzcfxqedyynyxvtong2vgbds355uwjq6bfsdi7u.py # Topologically Sorted Source Nodes: [sub_1, pow_1, mul, margin_distance, margin_distance_, pow_2, mul_1, loss, sum_1, truediv, loss_1], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.sum, aten.div] # Source node to ATen node mapping: # loss => add # loss_1 => div_1 # margin_distance => sub # margin_distance_ => clamp_min # mul => mul # mul_1 => mul_1 # pow_1 => pow_1 # pow_2 => pow_2 # sub_1 => sub_1 # sum_1 => sum_1 # truediv => div # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 4), kwargs = {}) triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = tmp1 - tmp3 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp0 * tmp9 tmp11 = tmp5 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = 0.25 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub_1, pow_1, mul, margin_distance, margin_distance_, pow_2, mul_1, loss, sum_1, truediv, loss_1], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * class ContrastiveDistanceLoss(nn.Module): """ Contrastive distance loss """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastiveDistanceLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, distance_pred, distance_true): """ Forward propagation method for the contrastive loss. Args: distance_pred: predicted distances distance_true: true distances Returns: loss """ bs = len(distance_true) margin_distance = self.margin - distance_pred margin_distance_ = torch.clamp(margin_distance, min=0.0) loss = (1 - distance_true) * torch.pow(distance_pred, 2 ) + distance_true * torch.pow(margin_distance_, 2) if self.reduction == 'mean': loss = torch.sum(loss) / 2.0 / bs elif self.reduction == 'sum': loss = torch.sum(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = tmp1 - tmp3 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp0 * tmp9 tmp11 = tmp5 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = 0.25 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class ContrastiveDistanceLossNew(nn.Module): """ Contrastive distance loss """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastiveDistanceLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pokidyshev/catalyst
ContrastiveDistanceLoss
false
16,263
[ "Apache-2.0" ]
46
bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
https://github.com/pokidyshev/catalyst/tree/bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
ContrastivePairwiseEmbeddingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/le/clewmq2oyakpojeemfsrrjq5tneb2unj5om75r32lnu3wfwo4lbd.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xn/cxn33y6tp7levuwn6ff3wut6bbq2cwfo4g4ag3oohicpjxvbrazc.py # Topologically Sorted Source Nodes: [batch_idx, loss], Original ATen: [aten.arange, aten.nll_loss_forward] # Source node to ATen node mapping: # batch_idx => iota # loss => convert_element_type, div, full_default_1, ne_1, ne_2, neg, sum_2, sum_3, where_1 # Graph fragment: # %iota : [num_users=4] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%iota, -100), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%iota, -100), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type), kwargs = {}) triton_per_fused_arange_nll_loss_forward_1 = async_compile.triton('triton_per_fused_arange_nll_loss_forward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_arange_nll_loss_forward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_arange_nll_loss_forward_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp6 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp0 = r0 tmp1 = tl.full([1, 1], -100, tl.int64) tmp2 = tmp0 != tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp0, tmp3) tmp5 = tl.load(in_ptr0 + (tmp4 + (4*r0)), None, eviction_policy='evict_last') tmp7 = tl_math.exp(tmp6) tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = tmp5 - tmp17 tmp19 = -tmp18 tmp20 = 0.0 tmp21 = tl.where(tmp2, tmp19, tmp20) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = tmp2.to(tl.int64) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tmp28.to(tl.float32) tmp30 = tmp24 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp30, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pairwise_similarity], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (1, 4, 4), (0, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [batch_idx, loss], Original ATen: [aten.arange, aten.nll_loss_forward] triton_per_fused_arange_nll_loss_forward_1.run(buf4, buf1, 1, 4, grid=grid(1), stream=stream0) del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn.modules.loss import * import torch.nn as nn import torch.nn.functional as F from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * class ContrastivePairwiseEmbeddingLoss(nn.Module): """ ContrastivePairwiseEmbeddingLoss – proof of concept criterion. Still work in progress. """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastivePairwiseEmbeddingLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, embeddings_pred, embeddings_true): """ Work in progress. Args: embeddings_pred: predicted embeddings embeddings_true: true embeddings Returns: loss """ device = embeddings_pred.device pairwise_similarity = torch.einsum('se,ae->sa', embeddings_pred, embeddings_true) bs = embeddings_pred.shape[0] batch_idx = torch.arange(bs, device=device) loss = F.cross_entropy(pairwise_similarity, batch_idx, reduction= self.reduction) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused_arange_nll_loss_forward_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp6 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp0 = r0 tmp1 = tl.full([1, 1], -100, tl.int64) tmp2 = tmp0 != tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp0, tmp3) tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = tl_math.exp(tmp6) tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = tmp5 - tmp17 tmp19 = -tmp18 tmp20 = 0.0 tmp21 = tl.where(tmp2, tmp19, tmp20) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = tmp2.to(tl.int64) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tmp28.to(tl.float32) tmp30 = tmp24 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (1, 4, 4), (0, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 triton_per_fused_arange_nll_loss_forward_1[grid(1)](buf4, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf4, class ContrastivePairwiseEmbeddingLossNew(nn.Module): """ ContrastivePairwiseEmbeddingLoss – proof of concept criterion. Still work in progress. """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastivePairwiseEmbeddingLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pokidyshev/catalyst
ContrastivePairwiseEmbeddingLoss
false
16,264
[ "Apache-2.0" ]
46
bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
https://github.com/pokidyshev/catalyst/tree/bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
GCNModelVAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3w/c3w6ed26vbtzgzigkkyuiuwphm777x7aoogcxkvoly3ia7qifnzg.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # output_1 => relu # Graph fragment: # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf2, 16, grid=grid(16), stream=stream0) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [support_2], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_5, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf5, out=buf6) del buf5 return (buf4, buf6, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter import torch.nn as nn import torch.nn.modules.loss class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, dropout=0.0, act=F.relu): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.act = act self.weight = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight) def forward(self, input, adj): input = F.dropout(input, self.dropout, self.training) support = torch.mm(input, self.weight) output = torch.spmm(adj, support) output = self.act(output) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class InnerProductDecoder(nn.Module): """Decoder for using inner product for prediction.""" def __init__(self, dropout, act=torch.sigmoid): super(InnerProductDecoder, self).__init__() self.dropout = dropout self.act = act def forward(self, z): z = F.dropout(z, self.dropout, training=self.training) adj = self.act(torch.mm(z, z.t())) return adj class GCNModelVAE(nn.Module): def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout): super(GCNModelVAE, self).__init__() self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.dc = InnerProductDecoder(dropout, act=lambda x: x) def encode(self, x, adj): hidden1 = self.gc1(x, adj) return self.gc2(hidden1, adj), self.gc3(hidden1, adj) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def forward(self, x, adj): mu, logvar = self.encode(x, adj) z = self.reparameterize(mu, logvar) return z, mu, logvar def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_feat_dim': 4, 'hidden_dim1': 4, 'hidden_dim2': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter import torch.nn as nn import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 extern_kernels.mm(buf2, primals_5, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf5, out=buf6) del buf5 return buf4, buf6, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, dropout=0.0, act=F.relu): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.act = act self.weight = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight) def forward(self, input, adj): input = F.dropout(input, self.dropout, self.training) support = torch.mm(input, self.weight) output = torch.spmm(adj, support) output = self.act(output) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class InnerProductDecoder(nn.Module): """Decoder for using inner product for prediction.""" def __init__(self, dropout, act=torch.sigmoid): super(InnerProductDecoder, self).__init__() self.dropout = dropout self.act = act def forward(self, z): z = F.dropout(z, self.dropout, training=self.training) adj = self.act(torch.mm(z, z.t())) return adj class GCNModelVAENew(nn.Module): def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout): super(GCNModelVAENew, self).__init__() self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.dc = InnerProductDecoder(dropout, act=lambda x: x) def encode(self, x, adj): hidden1 = self.gc1(x, adj) return self.gc2(hidden1, adj), self.gc3(hidden1, adj) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_2 = self.gc2.weight primals_3 = self.gc3.weight primals_4 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1], output[2]
peterfeifanchen/scGNN
GCNModelVAE
false
16,265
[ "MIT" ]
60
4ef9013ad0f44f9f51708e9bb60e5138f5706593
https://github.com/peterfeifanchen/scGNN/tree/4ef9013ad0f44f9f51708e9bb60e5138f5706593
Shared
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wq/cwqkfc7efcgiuv6rsa3stkinyzeft7fq5wl4uyfa53emahjnunte.py # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf4, 16, grid=grid(16), stream=stream0) del primals_5 return (buf3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.utils.data import torch.optim import torch.utils.data.distributed class Shared(torch.nn.Module): def __init__(self, args): super(Shared, self).__init__() ncha, self.size, _ = args.inputsize self.taskcla = args.taskcla self.latent_dim = args.latent_dim self.nhid = args.units self.nlayers = args.nlayers self.relu = torch.nn.ReLU() self.drop = torch.nn.Dropout(0.2) self.fc1 = torch.nn.Linear(ncha * self.size * self.size, self.nhid) if self.nlayers == 3: self.fc2 = torch.nn.Linear(self.nhid, self.nhid) self.fc3 = torch.nn.Linear(self.nhid, self.latent_dim) else: self.fc2 = torch.nn.Linear(self.nhid, self.latent_dim) def forward(self, x_s): h = x_s.view(x_s.size(0), -1) h = self.drop(self.relu(self.fc1(h))) h = self.drop(self.relu(self.fc2(h))) if self.nlayers == 3: h = self.drop(self.relu(self.fc3(h))) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config(inputsize=[4, 4, 4], taskcla=4, latent_dim=4, units=4, nlayers=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3, primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, buf4, primals_4 class SharedNew(torch.nn.Module): def __init__(self, args): super(SharedNew, self).__init__() ncha, self.size, _ = args.inputsize self.taskcla = args.taskcla self.latent_dim = args.latent_dim self.nhid = args.units self.nlayers = args.nlayers self.relu = torch.nn.ReLU() self.drop = torch.nn.Dropout(0.2) self.fc1 = torch.nn.Linear(ncha * self.size * self.size, self.nhid) if self.nlayers == 3: self.fc2 = torch.nn.Linear(self.nhid, self.nhid) self.fc3 = torch.nn.Linear(self.nhid, self.latent_dim) else: self.fc2 = torch.nn.Linear(self.nhid, self.latent_dim) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Prathyusha-Akundi/Adversarial-Continual-Learning
Shared
false
16,266
[ "MIT" ]
237
edf4bbd2c4c61f1cc20818793702ef8c6cf4e0df
https://github.com/Prathyusha-Akundi/Adversarial-Continual-Learning/tree/edf4bbd2c4c61f1cc20818793702ef8c6cf4e0df
LearnedPositionalEncoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wl/cwlax6mkanfsay7ebbq7nkgrrbq6ie4csq26r47jh4dagnky6wnp.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 512), (2048, 512, 1)) assert_size_stride(primals_2, (4, 4, 4, 512), (8192, 2048, 512, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 32768, grid=grid(32768), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 512), (2048, 512, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 512), (8192, 2048, 512, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class LearnedPositionalEncoding(nn.Module): def __init__(self, max_position_embeddings, embedding_dim, seq_length): super(LearnedPositionalEncoding, self).__init__() self.position_embeddings = nn.Parameter(torch.zeros(1, seq_length, 512) ) def forward(self, x, position_ids=None): position_embeddings = self.position_embeddings return x + position_embeddings def get_inputs(): return [torch.rand([4, 4, 4, 512])] def get_init_inputs(): return [[], {'max_position_embeddings': 4, 'embedding_dim': 4, 'seq_length': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 512), (2048, 512, 1)) assert_size_stride(primals_2, (4, 4, 4, 512), (8192, 2048, 512, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(32768)](primals_2, primals_1, buf0, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEncodingNew(nn.Module): def __init__(self, max_position_embeddings, embedding_dim, seq_length): super(LearnedPositionalEncodingNew, self).__init__() self.position_embeddings = nn.Parameter(torch.zeros(1, seq_length, 512) ) def forward(self, input_0): primals_1 = self.position_embeddings primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
potpov/TransBTS
LearnedPositionalEncoding
false
16,267
[ "Apache-2.0" ]
163
658de5f1dde17d25db54fb07adf49370cc32d7c3
https://github.com/potpov/TransBTS/tree/658de5f1dde17d25db54fb07adf49370cc32d7c3
Adder2D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zr/czriihulllg44xhaffhdwvpylq55lkftttdpdjcdqfcu3eujm5rf.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/44/c44i7vnizkohfraswy2fybvcbu4j2tl54qnzscskv4lizxkoye7f.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.sub, aten.abs, aten.sum] # Source node to ATen node mapping: # out => abs_1, sub, sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_6, %unsqueeze_7), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [1]), kwargs = {}) triton_per_fused_abs_sub_sum_1 = async_compile.triton('triton_per_fused_abs_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = (xindex // 4) x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (4*r2)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/eh/ceh3drpczc5qvlpgea6jmkqhnvpfcv3u5sykd2ulnebudkpn4a6l.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # out_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tmp1 = -tmp0 tl.store(out_ptr0 + (x1 + (4*y0)), tmp1, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.sub, aten.abs, aten.sum] triton_per_fused_abs_sub_sum_1.run(primals_1, buf0, buf1, 16, 64, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf1, buf2, 4, 4, grid=grid(4, 4), stream=stream0) del buf1 return (buf2, primals_1, reinterpret_tensor(buf0, (1, 64, 4), (256, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch import torch.nn as nn from torch.autograd.function import Function def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class Adder2D(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False, quantize=False, weight_bits=8, sparsity=0): super(Adder2D, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, x): output = adder2d_function(x, self.adder, self.stride, self.padding) if self.bias: output += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4, 'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.autograd import Function import math import torch.nn as nn from torch.autograd.function import Function assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_per_fused_abs_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = -tmp0 tl.store(out_ptr0 + (x1 + 4 * y0), tmp1, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_abs_sub_sum_1[grid(16)](primals_1, buf0, buf1, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return buf2, primals_1, reinterpret_tensor(buf0, (1, 64, 4), (256, 4, 1), 0 ) def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class Adder2DNew(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False, quantize=False, weight_bits=8, sparsity=0): super(Adder2DNew, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, input_0): primals_1 = self.adder primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
poppin-mice/ShiftAddNet
Adder2D
false
16,268
[ "MIT" ]
55
a17369a50da5bba6250fdeac7c065bd00f293f3c
https://github.com/poppin-mice/ShiftAddNet/tree/a17369a50da5bba6250fdeac7c065bd00f293f3c
DeiTOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.add] # Source node to ATen node mapping: # hidden_states_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0) del primals_2 del primals_4 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.utils.checkpoint class DeiTOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class DeiTOutputNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
jxhe/unify-parameter-efficient-tuning
DeiTOutput
false
16,269
[ "Apache-2.0" ]
101
3222ce2c0079566a28043e22380eb4ab6ad14389
https://github.com/jxhe/unify-parameter-efficient-tuning/tree/3222ce2c0079566a28043e22380eb4ab6ad14389
four_layer_conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mm/cmm32x6e6zo4kvzt7wcved3womme4b2tnyjs4lykjdrbtircsvp3.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ge/cge7bnkkv3lovy5jtwqk5sazg4aouf273qc7u6jafiuctxmosb55.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (1048576*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p5/cp5f3ywbsz6yywuzmnyc62oqasgrxyr7eslb3ehdwfuid7crjbs6.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f5/cf5mfnk5qrjdz5hmwwrrwrtwup2dg4c4whg5igmssilhtdixftqw.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 256 y1 = (yindex // 256) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (1048576*y1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (4096*y3)), tmp4, None) tl.store(out_ptr1 + (y0 + (256*x2) + (1048576*y1)), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 1024, 4096, grid=grid(1024, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_4, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_6, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_8, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf6, primals_2, 4194304, grid=grid(4194304), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf8, primals_5, 4194304, grid=grid(4194304), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf10, primals_7, 4194304, grid=grid(4194304), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf12 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) buf13 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_3.run(buf11, primals_9, buf12, buf13, 1024, 4096, grid=grid(1024, 4096), stream=stream0) del buf11 del primals_9 return (buf12, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class four_layer_conv(torch.nn.Module): def __init__(self): super(four_layer_conv, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.fcn1 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn2 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn3 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn4 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) def forward(self, x): x = self.relu(self.fcn1(x)) x = self.relu(self.fcn2(x)) x = self.relu(self.fcn3(x)) x = self.relu(self.fcn4(x)) return x def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1048576 * y1), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp4, None) tl.store(out_ptr1 + (y0 + 256 * x2 + 1048576 * y1), tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(65536, 9)](primals_1, buf0, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) triton_poi_fused_1[grid(1024, 4096)](primals_3, buf1, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_0[grid(65536, 9)](primals_4, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_0[grid(65536, 9)](primals_6, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_0[grid(65536, 9)](primals_8, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_2[grid(4194304)](buf6, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_2[grid(4194304)](buf8, primals_5, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_2[grid(4194304)](buf10, primals_7, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf12 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) buf13 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(1024, 4096) ](buf11, primals_9, buf12, buf13, 1024, 4096, XBLOCK=32, YBLOCK =32, num_warps=4, num_stages=1) del buf11 del primals_9 return buf12, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13 class four_layer_convNew(torch.nn.Module): def __init__(self): super(four_layer_convNew, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.fcn1 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn2 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn3 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn4 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.fcn1.weight primals_2 = self.fcn1.bias primals_4 = self.fcn2.weight primals_5 = self.fcn2.bias primals_6 = self.fcn3.weight primals_7 = self.fcn3.bias primals_8 = self.fcn4.weight primals_9 = self.fcn4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
peckjon/detectorch
four_layer_conv
false
16,270
[ "Apache-2.0" ]
627
69d31250d79a72b12b7419638ef59163f833bbba
https://github.com/peckjon/detectorch/tree/69d31250d79a72b12b7419638ef59163f833bbba
ConcatSquashLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/2l/c2lbebbusddknc6lqjzbw4dwlncsmj2czynpigxl6tvqeprnhehy.py # Topologically Sorted Source Nodes: [gate, mul, ret], Original ATen: [aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # gate => sigmoid # mul => mul # ret => add # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %sigmoid), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [gate, mul, ret], Original ATen: [aten.sigmoid, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_0.run(buf3, buf2, buf0, 256, grid=grid(256), stream=stream0) return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Linear import torch.utils.tensorboard class ConcatSquashLinear(Module): def __init__(self, dim_in, dim_out, dim_ctx): super(ConcatSquashLinear, self).__init__() self._layer = Linear(dim_in, dim_out) self._hyper_bias = Linear(dim_ctx, dim_out, bias=False) self._hyper_gate = Linear(dim_ctx, dim_out) def forward(self, ctx, x): gate = torch.sigmoid(self._hyper_gate(ctx)) bias = self._hyper_bias(ctx) ret = self._layer(x) * gate + bias return ret def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4, 'dim_ctx': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn import Linear import torch.utils.tensorboard assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_0[grid(256)](buf3, buf2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), buf2 class ConcatSquashLinearNew(Module): def __init__(self, dim_in, dim_out, dim_ctx): super(ConcatSquashLinearNew, self).__init__() self._layer = Linear(dim_in, dim_out) self._hyper_bias = Linear(dim_ctx, dim_out, bias=False) self._hyper_gate = Linear(dim_ctx, dim_out) def forward(self, input_0, input_1): primals_1 = self._layer.weight primals_2 = self._layer.bias primals_4 = self._hyper_bias.weight primals_5 = self._hyper_gate.weight primals_6 = self._hyper_gate.bias primals_3 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
entc-17-fyp-05/diffusion-point-cloud
ConcatSquashLinear
false
16,271
[ "MIT" ]
138
cde2e501855dea31496ddffad16f40aa588e3af8
https://github.com/entc-17-fyp-05/diffusion-point-cloud/tree/cde2e501855dea31496ddffad16f40aa588e3af8
S2S2Mean
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4x/c4xw4n4kx5yde4nszp4f6ln4z73qjck6i5lwfua7xa5k7on7obs5.py # Topologically Sorted Source Nodes: [norm, clamp, e1, mul, sum_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul, aten.sum] # Source node to ATen node mapping: # clamp => clamp_min # e1 => div # mul => mul # norm => pow_1, pow_2, sum_1 # sum_1 => sum_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %clamp_min), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %select_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float64) tmp2 = tmp1 * tmp1 tmp4 = tmp3.to(tl.float64) tmp5 = tmp4 * tmp4 tmp6 = tmp2 + tmp5 tmp8 = tmp7.to(tl.float64) tmp9 = tmp8 * tmp8 tmp10 = tmp6 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = tl.full([1], 1e-05, tl.float64) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp1 / tmp13 tmp16 = tmp15.to(tl.float64) tmp17 = tmp14 * tmp16 tmp18 = tmp4 / tmp13 tmp20 = tmp19.to(tl.float64) tmp21 = tmp18 * tmp20 tmp22 = tmp17 + tmp21 tmp23 = tmp8 / tmp13 tmp25 = tmp24.to(tl.float64) tmp26 = tmp23 * tmp25 tmp27 = tmp22 + tmp26 tl.store(out_ptr0 + (x0), tmp27, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/n3/cn3isdzkvtqu7qomxvqfxwiq6sglor72xgtgw7wcz33yzcyjfby7.py # Topologically Sorted Source Nodes: [norm, clamp, e1, mul, sum_1, mul_1, u2], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul, aten.sum, aten.sub] # Source node to ATen node mapping: # clamp => clamp_min # e1 => div # mul => mul # mul_1 => mul_1 # norm => pow_1, pow_2, sum_1 # sum_1 => sum_2 # u2 => sub # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %clamp_min), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %select_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %div), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %mul_1), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp64', 2: '*fp64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) x2 = xindex tmp0 = tl.load(in_ptr0 + (3 + x0 + (6*x1)), xmask) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (x0 + (6*x1)), xmask) tmp5 = tl.load(in_ptr0 + (6*x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (6*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (6*x1)), xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.float64) tmp4 = tmp3.to(tl.float64) tmp6 = tmp5.to(tl.float64) tmp7 = tmp6 * tmp6 tmp9 = tmp8.to(tl.float64) tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12.to(tl.float64) tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tl.full([1], 1e-05, tl.float64) tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp4 / tmp18 tmp20 = tmp2 * tmp19 tmp21 = tmp1 - tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ge/cgem2dvssnensibmv3ekms2p44vfkl3a4fq267xhv3hzg6ku2nev.py # Topologically Sorted Source Nodes: [norm, clamp, e1, norm_1, clamp_1, e2, e3], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.linalg_cross] # Source node to ATen node mapping: # clamp => clamp_min # clamp_1 => clamp_min_1 # e1 => div # e2 => div_1 # e3 => index, index_1, index_2, index_3, mul_2, mul_3 # norm => pow_1, pow_2, sum_1 # norm_1 => pow_3, pow_4, sum_3 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %clamp_min), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-05), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %clamp_min_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div, [None, %remainder]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div_1, [None, %remainder_1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div, [None, %remainder_1]), kwargs = {}) # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div_1, [None, %remainder]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %index_3), kwargs = {}) triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2 = async_compile.triton('triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp64', 2: '*fp64', 3: '*fp64', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) x2 = xindex tmp0 = tl.load(in_ptr0 + ((6*x1) + ((1 + x0) % 3)), xmask) tmp2 = tl.load(in_ptr0 + (6*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (6*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (6*x1)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + ((3*x1) + ((2 + x0) % 3)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3*x1), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (1 + (3*x1)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + ((6*x1) + ((2 + x0) % 3)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + ((3*x1) + ((1 + x0) % 3)), xmask) tmp1 = tmp0.to(tl.float64) tmp3 = tmp2.to(tl.float64) tmp4 = tmp3 * tmp3 tmp6 = tmp5.to(tl.float64) tmp7 = tmp6 * tmp6 tmp8 = tmp4 + tmp7 tmp10 = tmp9.to(tl.float64) tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = tl.full([1], 1e-05, tl.float64) tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp1 / tmp15 tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = triton_helpers.maximum(tmp26, tmp14) tmp28 = tmp17 / tmp27 tmp29 = tmp16 * tmp28 tmp31 = tmp30.to(tl.float64) tmp32 = tmp31 / tmp15 tmp34 = tmp33 / tmp27 tmp35 = tmp32 * tmp34 tl.store(out_ptr0 + (x2), tmp29, xmask) tl.store(out_ptr1 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vo/cvozlzplax3nlmmikxejqstyxieq2nypdvo4yprphgt766n4nzil.py # Topologically Sorted Source Nodes: [stack, float_1], Original ATen: [aten.stack, aten._to_copy] # Source node to ATen node mapping: # float_1 => convert_element_type_1 # stack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1, %sub_1], 1), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_3, torch.float32), kwargs = {}) triton_poi_fused__to_copy_stack_3 = async_compile.triton('triton_poi_fused__to_copy_stack_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp64', 2: '*fp64', 3: '*fp64', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 9 x1 = (xindex // 9) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((6*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp5.to(tl.float64) tmp7 = tl.load(in_ptr0 + (6*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp7.to(tl.float64) tmp9 = tmp8 * tmp8 tmp10 = tl.load(in_ptr0 + (1 + (6*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp10.to(tl.float64) tmp12 = tmp11 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.load(in_ptr0 + (2 + (6*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14.to(tl.float64) tmp16 = tmp15 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tl.full([1], 1e-05, tl.float64) tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp6 / tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tmp25 = tl.full([1], 6, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tmp24 & tmp26 tmp28 = tl.load(in_ptr1 + ((3*x1) + ((-3) + x0)), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr1 + (3*x1), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp29 * tmp29 tmp31 = tl.load(in_ptr1 + (1 + (3*x1)), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tl.load(in_ptr1 + (2 + (3*x1)), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = libdevice.sqrt(tmp36) tmp38 = triton_helpers.maximum(tmp37, tmp19) tmp39 = tmp28 / tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp27, tmp39, tmp40) tmp42 = tmp0 >= tmp25 tmp43 = tl.full([1], 9, tl.int64) tmp44 = tmp0 < tmp43 tmp45 = tl.load(in_ptr2 + ((3*x1) + ((-6) + x0)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr3 + ((3*x1) + ((-6) + x0)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 - tmp46 tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp42, tmp47, tmp48) tmp50 = tl.where(tmp27, tmp41, tmp49) tmp51 = tl.where(tmp4, tmp23, tmp50) tmp52 = tmp51.to(tl.float32) tl.store(out_ptr1 + (x2), tmp52, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 4), (4, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 1), (1, 64), torch.float64) # Topologically Sorted Source Nodes: [norm, clamp, e1, mul, sum_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float64) # Topologically Sorted Source Nodes: [norm, clamp, e1, mul, sum_1, mul_1, u2], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul, aten.sum, aten.sub] triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1.run(buf0, buf1, buf2, 192, grid=grid(192), stream=stream0) del buf1 buf3 = empty_strided_cuda((64, 3), (3, 1), torch.float64) buf4 = empty_strided_cuda((64, 3), (3, 1), torch.float64) # Topologically Sorted Source Nodes: [norm, clamp, e1, norm_1, clamp_1, e2, e3], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.linalg_cross] triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2.run(buf0, buf2, buf3, buf4, 192, grid=grid(192), stream=stream0) buf6 = empty_strided_cuda((64, 3, 3), (9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [stack, float_1], Original ATen: [aten.stack, aten._to_copy] triton_poi_fused__to_copy_stack_3.run(buf0, buf2, buf3, buf4, buf6, 576, grid=grid(576), stream=stream0) del buf2 del buf3 del buf4 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn def s2s2_gram_schmidt(v1, v2): """Normalise 2 3-vectors. Project second to orthogonal component. Take cross product for third. Stack to form SO matrix.""" u1 = v1 e1 = u1 / u1.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) u2 = v2 - (e1 * v2).sum(-1, keepdim=True) * e1 e2 = u2 / u2.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) e3 = torch.cross(e1, e2) return torch.stack([e1, e2, e3], 1) class S2S2Mean(nn.Module): """Module to map R^6 -> SO(3) with S2S2 method.""" def __init__(self, input_dims): super().__init__() self.map = nn.Linear(input_dims, 6) self.map.weight.data.uniform_(-10, 10) self.map.bias.data.uniform_(-10, 10) def forward(self, x): v = self.map(x).double().view(-1, 2, 3) v1, v2 = v[:, 0], v[:, 1] return s2s2_gram_schmidt(v1, v2).float() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float64) tmp2 = tmp1 * tmp1 tmp4 = tmp3.to(tl.float64) tmp5 = tmp4 * tmp4 tmp6 = tmp2 + tmp5 tmp8 = tmp7.to(tl.float64) tmp9 = tmp8 * tmp8 tmp10 = tmp6 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = tl.full([1], 1e-05, tl.float64) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp1 / tmp13 tmp16 = tmp15.to(tl.float64) tmp17 = tmp14 * tmp16 tmp18 = tmp4 / tmp13 tmp20 = tmp19.to(tl.float64) tmp21 = tmp18 * tmp20 tmp22 = tmp17 + tmp21 tmp23 = tmp8 / tmp13 tmp25 = tmp24.to(tl.float64) tmp26 = tmp23 * tmp25 tmp27 = tmp22 + tmp26 tl.store(out_ptr0 + x0, tmp27, xmask) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (3 + x0 + 6 * x1), xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (x0 + 6 * x1), xmask) tmp5 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 6 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float64) tmp4 = tmp3.to(tl.float64) tmp6 = tmp5.to(tl.float64) tmp7 = tmp6 * tmp6 tmp9 = tmp8.to(tl.float64) tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12.to(tl.float64) tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tl.full([1], 1e-05, tl.float64) tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp4 / tmp18 tmp20 = tmp2 * tmp19 tmp21 = tmp1 - tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (6 * x1 + (1 + x0) % 3), xmask) tmp2 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 6 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 * x1 + (2 + x0) % 3), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + 3 * x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (1 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr0 + (6 * x1 + (2 + x0) % 3), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (3 * x1 + (1 + x0) % 3), xmask) tmp1 = tmp0.to(tl.float64) tmp3 = tmp2.to(tl.float64) tmp4 = tmp3 * tmp3 tmp6 = tmp5.to(tl.float64) tmp7 = tmp6 * tmp6 tmp8 = tmp4 + tmp7 tmp10 = tmp9.to(tl.float64) tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = tl.full([1], 1e-05, tl.float64) tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp1 / tmp15 tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = triton_helpers.maximum(tmp26, tmp14) tmp28 = tmp17 / tmp27 tmp29 = tmp16 * tmp28 tmp31 = tmp30.to(tl.float64) tmp32 = tmp31 / tmp15 tmp34 = tmp33 / tmp27 tmp35 = tmp32 * tmp34 tl.store(out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__to_copy_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 9 x1 = xindex // 9 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (6 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp5.to(tl.float64) tmp7 = tl.load(in_ptr0 + 6 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp7.to(tl.float64) tmp9 = tmp8 * tmp8 tmp10 = tl.load(in_ptr0 + (1 + 6 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp10.to(tl.float64) tmp12 = tmp11 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.load(in_ptr0 + (2 + 6 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp14.to(tl.float64) tmp16 = tmp15 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tl.full([1], 1e-05, tl.float64) tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp6 / tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tmp25 = tl.full([1], 6, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tmp24 & tmp26 tmp28 = tl.load(in_ptr1 + (3 * x1 + (-3 + x0)), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr1 + 3 * x1, tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp29 * tmp29 tmp31 = tl.load(in_ptr1 + (1 + 3 * x1), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tl.load(in_ptr1 + (2 + 3 * x1), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = libdevice.sqrt(tmp36) tmp38 = triton_helpers.maximum(tmp37, tmp19) tmp39 = tmp28 / tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp27, tmp39, tmp40) tmp42 = tmp0 >= tmp25 tl.full([1], 9, tl.int64) tmp45 = tl.load(in_ptr2 + (3 * x1 + (-6 + x0)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr3 + (3 * x1 + (-6 + x0)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 - tmp46 tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp42, tmp47, tmp48) tmp50 = tl.where(tmp27, tmp41, tmp49) tmp51 = tl.where(tmp4, tmp23, tmp50) tmp52 = tmp51.to(tl.float32) tl.store(out_ptr1 + x2, tmp52, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 4), (4, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 1), (1, 64), torch.float64) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float64) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1[grid(192)]( buf0, buf1, buf2, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 3), (3, 1), torch.float64) buf4 = empty_strided_cuda((64, 3), (3, 1), torch.float64) triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2[grid(192) ](buf0, buf2, buf3, buf4, 192, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused__to_copy_stack_3[grid(576)](buf0, buf2, buf3, buf4, buf6, 576, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del buf4 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def s2s2_gram_schmidt(v1, v2): """Normalise 2 3-vectors. Project second to orthogonal component. Take cross product for third. Stack to form SO matrix.""" u1 = v1 e1 = u1 / u1.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) u2 = v2 - (e1 * v2).sum(-1, keepdim=True) * e1 e2 = u2 / u2.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) e3 = torch.cross(e1, e2) return torch.stack([e1, e2, e3], 1) class S2S2MeanNew(nn.Module): """Module to map R^6 -> SO(3) with S2S2 method.""" def __init__(self, input_dims): super().__init__() self.map = nn.Linear(input_dims, 6) self.map.weight.data.uniform_(-10, 10) self.map.bias.data.uniform_(-10, 10) def forward(self, input_0): primals_1 = self.map.weight primals_2 = self.map.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pimdh/lie-vae
S2S2Mean
false
16,272
[ "MIT" ]
83
0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
https://github.com/pimdh/lie-vae/tree/0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
CrossLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mz/cmzimqelmzcu5pwdstsgeqeyhghmdormtjn7cedudos37hzsbjnd.py # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %view_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2), xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(buf1, primals_4, primals_2, primals_3, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class CrossLayer(nn.Module): def __init__(self, d, dropout): super().__init__() self.linear = nn.Linear(d, d) self.dropout = nn.Dropout(dropout) def forward(self, x0, x): return self.dropout(x0 * self.linear(x)) + x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_4, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class CrossLayerNew(nn.Module): def __init__(self, d, dropout): super().__init__() self.linear = nn.Linear(d, d) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ppmdatix/rtdl
CrossLayer
false
16,273
[ "Apache-2.0" ]
298
a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
https://github.com/ppmdatix/rtdl/tree/a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
InitConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rc/crcbvljomz4rhpd2d25pe5zidhn6m6igcqc5c2okorn3x27sgc57.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.bernoulli] # Source node to ATen node mapping: # y_1 => bernoulli # Graph fragment: # %bernoulli : [num_users=2] = call_function[target=torch.ops.aten.bernoulli.p](args = (%empty, 0.8), kwargs = {}) triton_poi_fused_bernoulli_0 = async_compile.triton('triton_poi_fused_bernoulli_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bernoulli_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bernoulli_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = float("nan") tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q7/cq7iulydceioc26k22xuz3efipl37ginn756i4i4mi2luwjx6xxm.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # y_1 => div, mul # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%bernoulli, 0.8), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %div), kwargs = {}) triton_poi_fused_div_mul_1 = async_compile.triton('triton_poi_fused_div_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 1.25 tmp5 = tmp3 * tmp4 tmp6 = tmp2 * tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.bernoulli] stream0 = get_raw_stream(0) triton_poi_fused_bernoulli_0.run(buf2, 16, grid=grid(16), stream=stream0) torch.ops.aten.bernoulli_.float(buf2, 0.8) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.div, aten.mul] triton_poi_fused_div_mul_1.run(buf4, primals_2, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 return (reinterpret_tensor(buf4, (16, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class InitConv(nn.Module): def __init__(self, in_channels=4, out_channels=16, dropout=0.2): super(InitConv, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1) self.dropout = dropout def forward(self, x): y = self.conv(x) y = F.dropout3d(y, self.dropout) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_bernoulli_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = float('nan') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 1.25 tmp5 = tmp3 * tmp4 tmp6 = tmp2 * tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_bernoulli_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) torch.ops.aten.bernoulli_.float(buf2, 0.8) buf4 = buf0 del buf0 triton_poi_fused_div_mul_1[grid(1024)](buf4, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf4, (16, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 class InitConvNew(nn.Module): def __init__(self, in_channels=4, out_channels=16, dropout=0.2): super(InitConvNew, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1) self.dropout = dropout def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
potpov/TransBTS
InitConv
false
16,274
[ "Apache-2.0" ]
163
658de5f1dde17d25db54fb07adf49370cc32d7c3
https://github.com/potpov/TransBTS/tree/658de5f1dde17d25db54fb07adf49370cc32d7c3
SimpleNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hr/chrb4easzia4dw753qymwrkwdvp5554y3k2pc6zt3dtjbxgoihzj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3h/c3h4j2ijkg2jblud277lnet2ev5gdk6lyerwb36365pdquknq43k.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_2 => tanh_2 # Graph fragment: # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64), (64, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (1, 64), (64, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 64), (1, 64), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf7, primals_9, 4096, grid=grid(4096), stream=stream0) del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf9) del primals_11 return (reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SimpleNet(nn.Module): def __init__(self, ni): super().__init__() self.linear1 = nn.Linear(ni, 128) self.linear2 = nn.Linear(128, 128) self.linear3 = nn.Linear(128, 64) self.linear4 = nn.Linear(64, 64) self.linear5 = nn.Linear(64, 1) def forward(self, x): x = F.tanh(self.linear1(x)) x = F.tanh(self.linear2(x)) x = F.tanh(self.linear3(x)) x = F.tanh(self.linear4(x)) x = self.linear5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ni': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64), (64, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1, 64), (64, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(8192)](buf3, primals_5, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(4096)](buf5, primals_7, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 64), (1, 64), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf6 triton_poi_fused_tanh_1[grid(4096)](buf7, primals_9, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 1), (1, 64), 0 ), alpha=1, beta=1, out=buf9) del primals_11 return reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4 class SimpleNetNew(nn.Module): def __init__(self, ni): super().__init__() self.linear1 = nn.Linear(ni, 128) self.linear2 = nn.Linear(128, 128) self.linear3 = nn.Linear(128, 64) self.linear4 = nn.Linear(64, 64) self.linear5 = nn.Linear(64, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.linear5.weight primals_11 = self.linear5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
pranjukn/AI-Feynman
SimpleNet
false
16,275
[ "MIT" ]
470
92e67b01fc2b00ed6ebcacc67edf6122b4219ac7
https://github.com/pranjukn/AI-Feynman/tree/92e67b01fc2b00ed6ebcacc67edf6122b4219ac7
AlphaChooser
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/w6/cw6ti22sggrzsdr3ecwip4bwdpmtdk6viu5egnnemagot4sbotbr.py # Topologically Sorted Source Nodes: [sigmoid, alpha, clamp], Original ATen: [aten.sigmoid, aten.add, aten.clamp] # Source node to ATen node mapping: # alpha => add # clamp => clamp_max, clamp_min # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 1.01), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) triton_poi_fused_add_clamp_sigmoid_0 = async_compile.triton('triton_poi_fused_add_clamp_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 1.01 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 2.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, alpha, clamp], Original ATen: [aten.sigmoid, aten.add, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_sigmoid_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0) return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class AlphaChooser(torch.nn.Module): """ It manages the alpha values in alpha-entmax function. """ def __init__(self, head_count): super(AlphaChooser, self).__init__() self.pre_alpha = nn.Parameter(torch.randn(head_count)) def forward(self): alpha = 1 + torch.sigmoid(self.pre_alpha) return torch.clamp(alpha, min=1.01, max=2) def get_inputs(): return [] def get_init_inputs(): return [[], {'head_count': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 1.01 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 2.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_sigmoid_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf0, primals_1 class AlphaChooserNew(torch.nn.Module): """ It manages the alpha values in alpha-entmax function. """ def __init__(self, head_count): super(AlphaChooserNew, self).__init__() self.pre_alpha = nn.Parameter(torch.randn(head_count)) def forward(self): primals_1 = self.pre_alpha output = call([primals_1]) return output[0]
prajjwal1/fluence2
AlphaChooser
false
16,276
[ "Apache-2.0" ]
64
f7353f4947ac4712ecd1df34e97df27d83060f13
https://github.com/prajjwal1/fluence2/tree/f7353f4947ac4712ecd1df34e97df27d83060f13
GatedConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/q7/cq7qwv755rskgi3fxmqbrnzfm6sxg6uprg2cozcqvgaiyr3e5jdv.py # Topologically Sorted Source Nodes: [x_var_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_var_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cw/ccwnc6zu7wtvdgqeok62nd3zwn7koktxp4ll4fb6nwg4q32fihtc.py # Topologically Sorted Source Nodes: [sigmoid, out_1], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # out_1 => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + (64 + x0 + (128*x1)), xmask) tmp2 = tl.load(in_ptr0 + (x0 + (128*x1)), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + (x2), tmp1, xmask) tl.store(out_ptr1 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x_var_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_var_1], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 512, grid=grid(512), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, out_1], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_1.run(buf1, buf2, buf3, 256, grid=grid(256), stream=stream0) return (buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4, 4, 4), (128, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.init as init class GatedConv(nn.Module): """GatedConv.""" def __init__(self, input_size, width=3, dropout=0.2, nopad=False): """init.""" super(GatedConv, self).__init__() self.conv = nn.Conv2d(in_channels=input_size, out_channels=2 * input_size, kernel_size=(width, 1), stride=(1, 1), padding=( width // 2 * (1 - nopad), 0)) init.xavier_uniform_(self.conv.weight, gain=(4 * (1 - dropout)) ** 0.5) self.dropout = nn.Dropout(dropout) def forward(self, x_var): """forward.""" x_var = self.dropout(x_var) x_var = self.conv(x_var) out, gate = x_var.split(int(x_var.size(1) / 2), 1) out = out * torch.sigmoid(gate) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (64 + x0 + 128 * x1), xmask) tmp2 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(512)](buf1, primals_3, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4, 4, 4 ), (128, 16, 4, 1), 0), buf2 class GatedConvNew(nn.Module): """GatedConv.""" def __init__(self, input_size, width=3, dropout=0.2, nopad=False): """init.""" super(GatedConvNew, self).__init__() self.conv = nn.Conv2d(in_channels=input_size, out_channels=2 * input_size, kernel_size=(width, 1), stride=(1, 1), padding=( width // 2 * (1 - nopad), 0)) init.xavier_uniform_(self.conv.weight, gain=(4 * (1 - dropout)) ** 0.5) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pppku/SVS_system
GatedConv
false
16,277
[ "Apache-2.0" ]
78
95ef1076c51bfc0b74349b8058a9c918ff24c500
https://github.com/pppku/SVS_system/tree/95ef1076c51bfc0b74349b8058a9c918ff24c500
FFN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/37/c37iajzamvg4r3s5ikb4y6kka2x3towdlz4bqoh3dx4uywvya2mb.py # Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu => relu # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tr/ctrdeeo45yfmpbksxog7is2d6fd26mv2poki6u26emzhamo2zqxd.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x_4 => add # x_5 => clone, var_mean # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/px/cpxbmtafvoqnd5j3oyskd4thxpat5nbj25jgagf6an6xgvaf47sv.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x_4 => add # x_5 => add_1, add_2, clone, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 4), (64, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(buf4, primals_1, buf5, buf6, 16, grid=grid(16), stream=stream0) buf7 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0) del buf5 del buf6 del primals_7 return (buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16, 1), (16, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch as t class Conv(nn.Module): """Convolution Module.""" def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """init.""" super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" x = self.conv(x) return x class FFN(nn.Module): """Positionwise Feed-Forward Network.""" def __init__(self, num_hidden): """:param num_hidden: dimension of hidden.""" super(FFN, self).__init__() self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init= 'relu') self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1) self.dropout = nn.Dropout(p=0.1) self.layer_norm = nn.LayerNorm(num_hidden) def forward(self, input_): """forward.""" x = input_.transpose(1, 2) x = self.w_2(t.relu(self.w_1(x))) x = x.transpose(1, 2) x = x + input_ x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 4), (64, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf5 del buf6 del primals_7 return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class Conv(nn.Module): """Convolution Module.""" def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """init.""" super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" x = self.conv(x) return x class FFNNew(nn.Module): """Positionwise Feed-Forward Network.""" def __init__(self, num_hidden): """:param num_hidden: dimension of hidden.""" super(FFNNew, self).__init__() self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init= 'relu') self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1) self.dropout = nn.Dropout(p=0.1) self.layer_norm = nn.LayerNorm(num_hidden) def forward(self, input_0): primals_2 = self.w_1.conv.weight primals_3 = self.w_1.conv.bias primals_4 = self.w_2.conv.weight primals_5 = self.w_2.conv.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
pppku/SVS_system
FFN
false
16,278
[ "Apache-2.0" ]
78
95ef1076c51bfc0b74349b8058a9c918ff24c500
https://github.com/pppku/SVS_system/tree/95ef1076c51bfc0b74349b8058a9c918ff24c500
visual_context
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/57/c57elstawbcsybwi7or6ajdx6ljjj4wh376i2dzqpymiql66f5w4.py # Topologically Sorted Source Nodes: [visual_feature], Original ATen: [aten._adaptive_avg_pool2d] # Source node to ATen node mapping: # visual_feature => _adaptive_avg_pool2d # Graph fragment: # %_adaptive_avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%permute, [4, 1]), kwargs = {}) triton_poi_fused__adaptive_avg_pool2d_0 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 16), torch.float32) # Topologically Sorted Source Nodes: [visual_feature], Original ATen: [aten._adaptive_avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused__adaptive_avg_pool2d_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class visual_context(nn.Module): def __init__(self): super(visual_context, self).__init__() self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) def forward(self, visual_feature): visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) visual_feature = visual_feature.squeeze(3) return visual_feature def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 16), torch.float32) get_raw_stream(0) triton_poi_fused__adaptive_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), class visual_contextNew(nn.Module): def __init__(self): super(visual_contextNew, self).__init__() self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
prabhatrmishra/IDCardInfoExtr
visual_context
false
16,279
[ "Apache-2.0" ]
66
c59270f61a3251a6aff55bc7d81f2057c4663a37
https://github.com/prabhatrmishra/IDCardInfoExtr/tree/c59270f61a3251a6aff55bc7d81f2057c4663a37
DispConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # inputs => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nl/cnlc6yjkbua5jkop4rrww37vaigeaa5rgiz5dbh7hjclxg6xrxjb.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # out => convolution # out_1 => sigmoid # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class DispConv(nn.Module): def __init__(self, in_channels, out_channels): super(DispConv, self).__init__() self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.sigmod = nn.Sigmoid() def forward(self, inputs): out = self.conv(inputs) out = self.sigmod(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class DispConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(DispConvNew, self).__init__() self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.sigmod = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
DispConv
false
16,280
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
CosineAngularLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ip/cip7kjk6morvr63plpiexkjsx3sa2qcofrlqgaq2reellwiei74l.py # Topologically Sorted Source Nodes: [preds_norm, truths_norm, mul], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # mul => mul # preds_norm => div # truths_norm => div_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %expand_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x3), xmask) tmp17 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qi/cqioreumxsivhebibrcg6acibdf4dbzlpizkm7w27xzrxtkqbxwa.py # Topologically Sorted Source Nodes: [sum_1, neg, loss], Original ATen: [aten.sum, aten.neg, aten.mean] # Source node to ATen node mapping: # loss => mean # neg => neg # sum_1 => sum_3 # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {}) triton_per_fused_mean_neg_sum_1 = async_compile.triton('triton_per_fused_mean_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_neg_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = -tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 64.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [preds_norm, truths_norm, mul], Original ATen: [aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sum_1, neg, loss], Original ATen: [aten.sum, aten.neg, aten.mean] triton_per_fused_mean_neg_sum_1.run(buf2, buf0, 1, 64, grid=grid(1), stream=stream0) del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel class CosineAngularLoss(nn.Module): def __init__(self): super(CosineAngularLoss, self).__init__() def forward(self, preds, truths): preds_norm = torch.nn.functional.normalize(preds, p=2, dim=1) truths_norm = torch.nn.functional.normalize(truths, p=2, dim=1) loss = torch.mean(-torch.sum(preds_norm * truths_norm, dim=1)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_per_fused_mean_neg_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = -tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 64.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mean_neg_sum_1[grid(1)](buf2, buf0, 1, 64, XBLOCK= 1, num_warps=2, num_stages=1) del buf0 return buf2, class CosineAngularLossNew(nn.Module): def __init__(self): super(CosineAngularLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
princeton-vl/oasis
CosineAngularLoss
false
16,281
[ "BSD-3-Clause" ]
59
5835d24c331d78e91becba29f7e4a53ccd3e376e
https://github.com/princeton-vl/oasis/tree/5835d24c331d78e91becba29f7e4a53ccd3e376e
InfoLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qc/cqcysqz7schtteiiyh7rr3atkueb323eoynf6a2bkwwlu2tyowbm.py # Topologically Sorted Source Nodes: [x, add, log, mul, x_1, neg_entropy, add_1], Original ATen: [aten.mean, aten.add, aten.log, aten.mul, aten.div, aten.sum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # log => log # mul => mul # neg_entropy => sum_1 # x => mean # x_1 => div # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, %log), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.3862943611198906), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.0), kwargs = {}) triton_per_fused_add_div_log_mean_mul_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-08 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = 0.7213475204444817 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = 1.0 tmp19 = tmp17 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, add, log, mul, x_1, neg_entropy, add_1], Original ATen: [aten.mean, aten.add, aten.log, aten.mul, aten.div, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class InfoLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): x = torch.mean(x, 0) logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x.sum() return 1.0 + neg_entropy def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-08 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = 0.7213475204444817 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = 1.0 tmp19 = tmp17 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class InfoLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pudumagico/deepproblog
InfoLoss
false
16,282
[ "Apache-2.0" ]
54
6d38e783990551f4030780a1d69c7138fada2020
https://github.com/pudumagico/deepproblog/tree/6d38e783990551f4030780a1d69c7138fada2020
EntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ub/cubencqemve35nkt4xxcdqc2sosummtob4vaprerhysth2rnyxvd.py # Topologically Sorted Source Nodes: [add, log, mul, x, neg_entropy, mean, neg], Original ATen: [aten.add, aten.log, aten.mul, aten.div, aten.sum, aten.mean, aten.neg] # Source node to ATen node mapping: # add => add # log => log # mean => mean # mul => mul # neg => neg # neg_entropy => sum_1 # x => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.3862943611198906), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_add_div_log_mean_mul_neg_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp7 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp19 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 0.7213475204444817 tmp6 = tmp4 * tmp5 tmp8 = tmp7 + tmp1 tmp9 = tl_math.log(tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp10 * tmp5 tmp12 = tmp6 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 * tmp15 tmp17 = tmp16 * tmp5 tmp18 = tmp12 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tl_math.log(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp22 * tmp5 tmp24 = tmp18 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tmp30 = -tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp30, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, log, mul, x, neg_entropy, mean, neg], Original ATen: [aten.add, aten.log, aten.mul, aten.div, aten.sum, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_neg_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class EntropyLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x.sum(1) return -neg_entropy.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 0.7213475204444817 tmp6 = tmp4 * tmp5 tmp8 = tmp7 + tmp1 tmp9 = tl_math.log(tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp10 * tmp5 tmp12 = tmp6 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 * tmp15 tmp17 = tmp16 * tmp5 tmp18 = tmp12 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tl_math.log(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp22 * tmp5 tmp24 = tmp18 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tmp30 = -tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_neg_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class EntropyLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pudumagico/deepproblog
EntropyLoss
false
16,283
[ "Apache-2.0" ]
54
6d38e783990551f4030780a1d69c7138fada2020
https://github.com/pudumagico/deepproblog/tree/6d38e783990551f4030780a1d69c7138fada2020
ConvElu
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rt/crt5evayewqskckkf6yefprh7yyouufj5el3llyi6gjwqnekhgdh.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/44/c44sqqvrdntbf2gicjehowtoxqpmh6xlq4sgt4azip2c7bw3hgcr.py # Topologically Sorted Source Nodes: [conv2d, xout], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # xout => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 432, grid=grid(432), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 3, 4, 4), (48, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d, xout], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_1.run(buf2, primals_3, 192, grid=grid(192), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvElu(nn.Module): def __init__(self, in_ch=3, out_ch=3, dirate=1): super(ConvElu, self).__init__() self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, padding_mode='reflect') self.elu = nn.ELU(inplace=True) def forward(self, x): hx = x xout = self.elu(self.conv_s1(hx)) return xout def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(432)](primals_1, buf0, 432, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 3, 4, 4), (48, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(192)](buf2, primals_3, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class ConvEluNew(nn.Module): def __init__(self, in_ch=3, out_ch=3, dirate=1): super(ConvEluNew, self).__init__() self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, padding_mode='reflect') self.elu = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv_s1.weight primals_3 = self.conv_s1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
ConvElu
false
16,284
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
JSD
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/c2/cc2n7iuxwe7c4fifyjtjsmua6fdij2lesbujagykkpaoc52oniw5.py # Topologically Sorted Source Nodes: [add_1, log_1, mul_1, x, sum_2, x_1, sub, y, add, log, mul, y_1, y_2, add_2], Original ATen: [aten.add, aten.log, aten.mul, aten.div, aten.sum, aten.mean, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # log => log # log_1 => log_1 # mul => mul # mul_1 => mul_1 # sub => sub # sum_2 => sum_2 # x => div_1 # x_1 => mean_1 # y => mean # y_1 => div # y_2 => sum_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log_1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 1.3862943611198906), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_1, [1]), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mean_1), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, %log), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.3862943611198906), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %sum_1), kwargs = {}) triton_per_fused_add_div_log_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp7 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp19 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp28 = tl.load(in_ptr0 + (r2), None) tmp29 = tl.load(in_ptr0 + (64 + r2), None) tmp31 = tl.load(in_ptr0 + (128 + r2), None) tmp33 = tl.load(in_ptr0 + (192 + r2), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 0.7213475204444817 tmp6 = tmp4 * tmp5 tmp8 = tmp7 + tmp1 tmp9 = tl_math.log(tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp10 * tmp5 tmp12 = tmp6 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 * tmp15 tmp17 = tmp16 * tmp5 tmp18 = tmp12 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tl_math.log(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp22 * tmp5 tmp24 = tmp18 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp35 = 4.0 tmp36 = tmp34 / tmp35 tmp37 = tmp36 + tmp1 tmp38 = tl_math.log(tmp37) tmp39 = tmp36 * tmp38 tmp40 = tmp39 * tmp5 tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43 = tl.sum(tmp41, 1)[:, None] tmp44 = 64.0 tmp45 = tmp27 / tmp44 tmp46 = 1.0 tmp47 = tmp46 - tmp45 tmp48 = tmp47 + tmp43 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp48, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add_1, log_1, mul_1, x, sum_2, x_1, sub, y, add, log, mul, y_1, y_2, add_2], Original ATen: [aten.add, aten.log, aten.mul, aten.div, aten.sum, aten.mean, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_rsub_sum_0.run(buf2, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class JSD(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) y = torch.mean(x, 0) y = y * (y + eps).log() / logN y = y.sum() x = x * (x + eps).log() / logN x = x.sum(1).mean() return 1.0 - x + y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp28 = tl.load(in_ptr0 + r2, None) tmp29 = tl.load(in_ptr0 + (64 + r2), None) tmp31 = tl.load(in_ptr0 + (128 + r2), None) tmp33 = tl.load(in_ptr0 + (192 + r2), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 0.7213475204444817 tmp6 = tmp4 * tmp5 tmp8 = tmp7 + tmp1 tmp9 = tl_math.log(tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp10 * tmp5 tmp12 = tmp6 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 * tmp15 tmp17 = tmp16 * tmp5 tmp18 = tmp12 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tl_math.log(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp22 * tmp5 tmp24 = tmp18 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp35 = 4.0 tmp36 = tmp34 / tmp35 tmp37 = tmp36 + tmp1 tmp38 = tl_math.log(tmp37) tmp39 = tmp36 * tmp38 tmp40 = tmp39 * tmp5 tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43 = tl.sum(tmp41, 1)[:, None] tmp44 = 64.0 tmp45 = tmp27 / tmp44 tmp46 = 1.0 tmp47 = tmp46 - tmp45 tmp48 = tmp47 + tmp43 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp48, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_rsub_sum_0[grid(1)](buf2, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class JSDNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pudumagico/deepproblog
JSD
false
16,285
[ "Apache-2.0" ]
54
6d38e783990551f4030780a1d69c7138fada2020
https://github.com/pudumagico/deepproblog/tree/6d38e783990551f4030780a1d69c7138fada2020
Highway
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ii/ciitic7gw336jmjrzv6vnyt7nggf2elteacsju5rdvniywfl7erg.py # Topologically Sorted Source Nodes: [H, T, mul, sub, mul_1, add], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # H => relu # T => sigmoid # add => add # mul => mul # mul_1 => mul_1 # sub => sub # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %sigmoid), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [H, T, mul, sub, mul_1, add], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf0, buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_3, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Highway(nn.Module): def __init__(self, in_size, out_size): super(Highway, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf0, buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 class HighwayNew(nn.Module): def __init__(self, in_size, out_size): super(HighwayNew, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.H.weight primals_2 = self.H.bias primals_4 = self.T.weight primals_5 = self.T.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
puppyapple/tacotron_pytorch
Highway
false
16,286
[ "MIT" ]
278
800bf8b0538c91f1104e99d8e7c1b645bb6154d3
https://github.com/puppyapple/tacotron_pytorch/tree/800bf8b0538c91f1104e99d8e7c1b645bb6154d3
UpConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rc/crcgvn2t5cupsa3uzyzlz4vj5mtkxy2mopv5gmyjpcsh5voeodtr.py # Topologically Sorted Source Nodes: [out, inputs], Original ATen: [aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # inputs => _unsafe_index_1, _unsafe_index_2 # out => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_1, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused__unsafe_index_reflection_pad2d_0 = async_compile.triton('triton_poi_fused__unsafe_index_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 10) % 10 x0 = xindex % 10 x2 = (xindex // 100) x5 = xindex tmp0 = 7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cr/ccrfebtywtkn5ct3gnhnmnfdgmakzatlpfhdiithbltpuxxs3veo.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # out_1 => convolution # out_2 => expm1, gt, mul_4, mul_6, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_4,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_4, %mul_6), kwargs = {}) triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out, inputs], Original ATen: [aten._unsafe_index, aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0.run(primals_1, buf0, 1600, grid=grid(1600), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class UpConv(nn.Module): def __init__(self, in_channels, out_channels): super(UpConv, self).__init__() self.upsamp = nn.UpsamplingNearest2d(scale_factor=2) self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.elu = nn.ELU(inplace=True) def forward(self, inputs): out = self.upsamp(inputs) out = self.conv(out) out = self.elu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 % 10 x0 = xindex % 10 x2 = xindex // 100 x5 = xindex tmp0 = 7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch. float32) get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(1600)](primals_1 , buf0, 1600, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class UpConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(UpConvNew, self).__init__() self.upsamp = nn.UpsamplingNearest2d(scale_factor=2) self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.elu = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
UpConv
false
16,287
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
SoftDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wq/cwq5uygy7kcq5ujfxxfxcjhwrohlqk4d23fuusip7buftbecgoz2.py # Topologically Sorted Source Nodes: [mul, numerator, mul_1, mul_2, add, denominator, truediv], Original ATen: [aten.mul, aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # add => add # denominator => sum_2 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # numerator => sum_1 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_add_div_mul_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tmp0 * tmp0 tmp7 = tmp1 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp5 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, numerator, mul_1, mul_2, add, denominator, truediv], Original ATen: [aten.mul, aten.sum, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn.modules.loss import _Loss class SoftDiceLoss(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super(SoftDiceLoss, self).__init__(size_average, reduce, reduction) def forward(self, y_pred, y_gt): numerator = torch.sum(y_pred * y_gt) denominator = torch.sum(y_pred * y_pred + y_gt * y_gt) return numerator / denominator def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tmp0 * tmp0 tmp7 = tmp1 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp5 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SoftDiceLossNew(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super(SoftDiceLossNew, self).__init__(size_average, reduce, reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
purbayankar/pytorch-UNet
SoftDiceLoss
false
16,288
[ "MIT" ]
91
63183199b1cf4e23a37869d30fc335e484c0c0fe
https://github.com/purbayankar/pytorch-UNet/tree/63183199b1cf4e23a37869d30fc335e484c0c0fe
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/p7/cp7xzeyl6japtnkojqx5iupjksot3nuocbambsy2o3yflsevkl5j.py # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jq/cjqs74o7upz4nudmv2dbjbmggtmuj2mktvc7gygosrcj2d2xxghd.py # Topologically Sorted Source Nodes: [result_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # result_3 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %view_14], -1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x1 + (16*((-4) + x0))), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t2/ct2fj4cxakdevxwd5upea4iyfznuislybj5p4wd6jgtx5ayzurnk.py # Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # result_5 => add # result_6 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_16, %primals_2), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6g/c6gddvf7hivp3xdh4pyazhygzjkdnh5sxyn6itmcverzcfqnfwwt.py # Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # result_5 => add # result_6 => add_1, add_2, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_16, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf2, buf3, 4, 16, grid=grid(4, 16), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf0, buf4, 4, 16, grid=grid(4, 16), stream=stream0) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, buf8, 4, 16, grid=grid(4, 16), stream=stream0) buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [result_3], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(primals_2, buf9, buf10, 128, grid=grid(128), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(buf11, primals_2, buf12, buf13, 16, grid=grid(16), stream=stream0) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_2, buf12, buf13, primals_8, primals_9, buf14, 64, grid=grid(64), stream=stream0) del buf12 del buf13 del primals_9 return (buf14, reinterpret_tensor(buf7, (4, 4, 4, 4), (16, 64, 4, 1), 0), primals_2, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import Linear import torch as t from torch.autograd import Variable class MultiheadAttention(nn.Module): """Multihead attention mechanism (dot attention).""" def __init__(self, num_hidden_k): """:param num_hidden_k: dimension of hidden.""" super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None, gaussian_factor=None): """forward.""" attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if gaussian_factor is not None: attn = attn - gaussian_factor if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn class Attention(nn.Module): """Attention Network.""" def __init__(self, num_hidden, h=4, local_gaussian=False): """init.""" super(Attention, self).__init__() self.num_hidden = num_hidden self.num_hidden_per_attn = num_hidden // h self.h = h self.key = Linear(num_hidden, num_hidden, bias=False) self.value = Linear(num_hidden, num_hidden, bias=False) self.query = Linear(num_hidden, num_hidden, bias=False) self.multihead = MultiheadAttention(self.num_hidden_per_attn) self.local_gaussian = local_gaussian if local_gaussian: self.local_gaussian_factor = Variable(t.tensor(30.0), requires_grad=True).float() else: self.local_gaussian_factor = None self.residual_dropout = nn.Dropout(p=0.1) self.final_linear = Linear(num_hidden * 2, num_hidden) self.layer_norm_1 = nn.LayerNorm(num_hidden) def forward(self, memory, decoder_input, mask=None, query_mask=None): """forward.""" batch_size = memory.size(0) seq_k = memory.size(1) seq_q = decoder_input.size(1) if query_mask is not None: query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k) query_mask = query_mask.repeat(self.h, 1, 1) if mask is not None: mask = mask.repeat(self.h, 1, 1) key = self.key(memory).view(batch_size, seq_k, self.h, self. num_hidden_per_attn) value = self.value(memory).view(batch_size, seq_k, self.h, self. num_hidden_per_attn) query = self.query(decoder_input).view(batch_size, seq_q, self.h, self.num_hidden_per_attn) key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self. num_hidden_per_attn) value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self .num_hidden_per_attn) query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self .num_hidden_per_attn) if self.local_gaussian: row = t.arange(1, seq_k + 1).unsqueeze(0).unsqueeze(-1).repeat( batch_size * self.h, 1, seq_k) col = t.arange(1, seq_k + 1).unsqueeze(0).unsqueeze(0).repeat( batch_size * self.h, seq_k, 1) local_gaussian = t.pow(row - col, 2).float() self.local_gaussian_factor = self.local_gaussian_factor local_gaussian = local_gaussian / self.local_gaussian_factor else: local_gaussian = None result, attns = self.multihead(key, value, query, mask=mask, query_mask=query_mask, gaussian_factor=local_gaussian) attns = attns.view(self.h, batch_size, seq_q, seq_k) attns = attns.permute(1, 0, 2, 3) result = result.view(self.h, batch_size, seq_q, self. num_hidden_per_attn) result = result.permute(1, 2, 0, 3).contiguous().view(batch_size, seq_q, -1) result = t.cat([decoder_input, result], dim=-1) result = self.final_linear(result) result = result + decoder_input result = self.layer_norm_1(result) return result, attns class Linear(nn.Module): """Linear Module.""" def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """init.""" super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" return self.linear_layer(x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn from torch.nn import Linear import torch as t from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](buf2, buf3, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_0[grid(4, 16)](buf0, buf4, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](primals_2, buf9, buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](buf11, primals_2, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(64)](buf11, primals_2, buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_9 return buf14, reinterpret_tensor(buf7, (4, 4, 4, 4), (16, 64, 4, 1), 0 ), primals_2, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0 ), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) class MultiheadAttention(nn.Module): """Multihead attention mechanism (dot attention).""" def __init__(self, num_hidden_k): """:param num_hidden_k: dimension of hidden.""" super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None, gaussian_factor=None): """forward.""" attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if gaussian_factor is not None: attn = attn - gaussian_factor if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn class AttentionNew(nn.Module): """Attention Network.""" def __init__(self, num_hidden, h=4, local_gaussian=False): """init.""" super(AttentionNew, self).__init__() self.num_hidden = num_hidden self.num_hidden_per_attn = num_hidden // h self.h = h self.key = Linear(num_hidden, num_hidden, bias=False) self.value = Linear(num_hidden, num_hidden, bias=False) self.query = Linear(num_hidden, num_hidden, bias=False) self.multihead = MultiheadAttention(self.num_hidden_per_attn) self.local_gaussian = local_gaussian if local_gaussian: self.local_gaussian_factor = Variable(t.tensor(30.0), requires_grad=True).float() else: self.local_gaussian_factor = None self.residual_dropout = nn.Dropout(p=0.1) self.final_linear = Linear(num_hidden * 2, num_hidden) self.layer_norm_1 = nn.LayerNorm(num_hidden) def forward(self, input_0, input_1): primals_3 = self.key.linear_layer.weight primals_4 = self.value.linear_layer.weight primals_5 = self.query.linear_layer.weight primals_6 = self.final_linear.linear_layer.weight primals_7 = self.final_linear.linear_layer.bias primals_8 = self.layer_norm_1.weight primals_9 = self.layer_norm_1.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1] class Linear(nn.Module): """Linear Module.""" def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """init.""" super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" return self.linear_layer(x)
pppku/SVS_system
Attention
false
16,290
[ "Apache-2.0" ]
78
95ef1076c51bfc0b74349b8058a9c918ff24c500
https://github.com/pppku/SVS_system/tree/95ef1076c51bfc0b74349b8058a9c918ff24c500
BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/iq/ciqftckmgqckpktocc4vnzvzxloichfo465idr3x42oonjq74tp7.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [alignment], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) return (reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class BahdanauAttention(nn.Module): def __init__(self, dim): super(BahdanauAttention, self).__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, query, processed_memory): """ Args: query: (batch, 1, dim) or (batch, dim) processed_memory: (batch, max_time, dim) """ if query.dim() == 2: query = query.unsqueeze(1) processed_query = self.query_layer(query) alignment = self.v(self.tanh(processed_query + processed_memory)) return alignment.squeeze(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class BahdanauAttentionNew(nn.Module): def __init__(self, dim): super(BahdanauAttentionNew, self).__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, input_0, input_1): primals_2 = self.query_layer.weight primals_4 = self.v.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
puppyapple/tacotron_pytorch
BahdanauAttention
false
16,292
[ "MIT" ]
278
800bf8b0538c91f1104e99d8e7c1b645bb6154d3
https://github.com/puppyapple/tacotron_pytorch/tree/800bf8b0538c91f1104e99d8e7c1b645bb6154d3
ClassWisePool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wj/cwj6mseepwwsl64zrhfvnzd4gkux6dcys7drtkhi2dsgv2axk6do.py # Topologically Sorted Source Nodes: [output, truediv], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # output => sum_1 # truediv => div # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [2]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {}) triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, truediv], Original ATen: [aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import sys from torch.autograd import Function import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class ClassWisePoolFunction(Function): @staticmethod def forward(ctx, input, num_maps): batch_size, num_channels, h, w = input.size() if num_channels % num_maps != 0: None sys.exit(-1) num_outputs = int(num_channels / num_maps) x = input.view(batch_size, num_outputs, num_maps, h, w) output = torch.sum(x, 2) ctx.save_for_backward(input) ctx.num_maps = num_maps return output.view(batch_size, num_outputs, h, w) / num_maps @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors batch_size, num_channels, h, w = input.size() num_outputs = grad_output.size(1) grad_input = grad_output.view(batch_size, num_outputs, 1, h, w).expand( batch_size, num_outputs, ctx.num_maps, h, w).contiguous() return grad_input.view(batch_size, num_channels, h, w), None class ClassWisePool(nn.Module): def __init__(self, num_maps): super(ClassWisePool, self).__init__() self.num_maps = num_maps def forward(self, input): return ClassWisePoolFunction.apply(input, self.num_maps) def __repr__(self): return self.__class__.__name__ + ' (num_maps={num_maps})'.format( num_maps=self.num_maps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_maps': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import sys from torch.autograd import Function import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ClassWisePoolFunction(Function): @staticmethod def forward(ctx, input, num_maps): batch_size, num_channels, h, w = input.size() if num_channels % num_maps != 0: None sys.exit(-1) num_outputs = int(num_channels / num_maps) x = input.view(batch_size, num_outputs, num_maps, h, w) output = torch.sum(x, 2) ctx.save_for_backward(input) ctx.num_maps = num_maps return output.view(batch_size, num_outputs, h, w) / num_maps @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors batch_size, num_channels, h, w = input.size() num_outputs = grad_output.size(1) grad_input = grad_output.view(batch_size, num_outputs, 1, h, w).expand( batch_size, num_outputs, ctx.num_maps, h, w).contiguous() return grad_input.view(batch_size, num_channels, h, w), None class ClassWisePoolNew(nn.Module): def __init__(self, num_maps): super(ClassWisePoolNew, self).__init__() self.num_maps = num_maps def __repr__(self): return self.__class__.__name__ + ' (num_maps={num_maps})'.format( num_maps=self.num_maps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pyushkevich/wildcat.pytorch
ClassWisePool
false
16,293
[ "MIT" ]
273
2046cde4e4a350eb1172fe60035448aa8df632d5
https://github.com/pyushkevich/wildcat.pytorch/tree/2046cde4e4a350eb1172fe60035448aa8df632d5
BertLayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dg/cdgw6x7nju4bzp2wyuwgeanbco7zcjis6yiusovvnpz6zw3yjd3l.py # Topologically Sorted Source Nodes: [u, sub], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # sub => sub # u => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k3/ck3awyjmlyoxvkizg2opx6vtglv26uioox7nr33aabc2cmbcxgpr.py # Topologically Sorted Source Nodes: [pow_1, s, add, sqrt, x, mul, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # pow_1 => pow_1 # s => mean_1 # sqrt => sqrt # x => div # x_1 => add_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_pow_sqrt_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [u, sub], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s, add, sqrt, x, mul, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_pow_sqrt_1.run(primals_1, buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0) del buf0 del primals_1 del primals_3 return (buf1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.onnx.utils import torch.random import torch.cuda.random import torch.utils.cpp_extension class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): do_convert = self.weight.dtype != x.dtype if do_convert: x = x u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) x = self.weight * x + self.bias if do_convert: x = x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda import torch.onnx.utils import torch.random import torch.cuda.random import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_2, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_1, buf0, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_3 return buf1, primals_2 class BertLayerNormNew(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNew, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
nict-wisdom/rannc
BertLayerNorm
false
16,294
[ "MIT" ]
45
a1708807e053e2d58b7f6d6ed925f03aa8504416
https://github.com/nict-wisdom/rannc/tree/a1708807e053e2d58b7f6d6ed925f03aa8504416
ReLUDropout
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/z2/cz2rxhcm4pthct27emcnkzayyc4jm2tl6sekj3gkbml4p26f24mx.py # Topologically Sorted Source Nodes: [clamp_], Original ATen: [aten.clamp] # Source node to ATen node mapping: # clamp_ => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr1 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [clamp_], Original ATen: [aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0) return (arg0_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class ReLUDropout(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input): return relu_dropout(input, p=self.p, training=self.training, variational=self.variational, batch_first=self.batch_first) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.cuda import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return arg0_1, def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class ReLUDropoutNew(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
quanpn90/NMTGMinor
ReLUDropout
false
16,295
[ "MIT" ]
75
0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796
https://github.com/quanpn90/NMTGMinor/tree/0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796
Invertible1x1Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/o7/co7osyxs2cvh2l5heuqokn4jvtjd7nknmgqdhgapquyibl2quplf.py # Topologically Sorted Source Nodes: [double], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # double => convert_element_type # Graph fragment: # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_1, torch.float64), kwargs = {}) triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.float64) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/d3/cd3v4q3hd6rwt3uo2dduyq2musziw3mh6evdf7hcgki6a7y4nyi5.py # Topologically Sorted Source Nodes: [W, W_1, z_], Original ATen: [aten._to_copy, aten.view, aten.convolution] # Source node to ATen node mapping: # W => convert_element_type_1 # W_1 => view # z_ => convolution # Graph fragment: # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%getitem, torch.float32), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_1, [4, 4, 1, 1]), kwargs = {}) # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %view, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused__to_copy_convolution_view_1 = async_compile.triton('triton_poi_fused__to_copy_convolution_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp64', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_convolution_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_convolution_view_1(in_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr1 + (y0 + (4*x1)), tmp1, xmask & ymask) tl.store(out_ptr2 + (x1 + (4*y0)), tmp1, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cw/ccwsgl6yoa3pudtor33tywa2icywstpphcn75af7tre2bhikx6rs.py # Topologically Sorted Source Nodes: [log_det, mul, log_det_1], Original ATen: [aten.neg, aten.mul] # Source node to ATen node mapping: # log_det => neg # log_det_1 => mul_1 # mul => mul # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, 4), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 4), kwargs = {}) triton_poi_fused_mul_neg_2 = async_compile.triton('triton_poi_fused_mul_neg_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_neg_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_neg_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -tmp1 tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tl.store(in_out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (1, 4)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float64) # Topologically Sorted Source Nodes: [double], Original ATen: [aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [double, inverse], Original ATen: [aten._to_copy, aten.linalg_inv_ex] buf1 = torch.ops.aten.linalg_inv_ex.default(buf0) del buf0 buf2 = buf1[0] del buf1 buf5 = empty_strided_cuda((4, 4, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [W, W_1, z_], Original ATen: [aten._to_copy, aten.view, aten.convolution] triton_poi_fused__to_copy_convolution_view_1.run(buf2, buf5, buf11, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [slogdet], Original ATen: [aten._linalg_slogdet] buf6 = torch.ops.aten._linalg_slogdet.default(primals_1) del primals_1 buf8 = buf6[1] buf9 = buf6[2] buf10 = buf6[3] del buf6 # Topologically Sorted Source Nodes: [z_], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(primals_2, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) del buf11 buf13 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [log_det, mul, log_det_1], Original ATen: [aten.neg, aten.mul] triton_poi_fused_mul_neg_2.run(buf13, 1, grid=grid(1), stream=stream0) return (buf12, buf13, primals_2, buf5, buf9, buf10, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (1, 4), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class Flow(nn.Module): """ Generic class for flow functions """ def __init__(self): super().__init__() def forward(self, z): """ :param z: input variable, first dimension is batch dim :return: transformed z and log of absolute determinant """ raise NotImplementedError('Forward pass has not been implemented.') def inverse(self, z): raise NotImplementedError('This flow has no algebraic inverse.') class Invertible1x1Conv(Flow): """ Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW """ def __init__(self, num_channels, use_lu=False): """ Constructor :param num_channels: Number of channels of the data :param use_lu: Flag whether to parametrize weights through the LU decomposition """ super().__init__() self.num_channels = num_channels self.use_lu = use_lu Q = torch.qr(torch.randn(self.num_channels, self.num_channels))[0] if use_lu: P, L, U = torch.lu_unpack(*Q.lu()) self.register_buffer('P', P) self.L = nn.Parameter(L) S = U.diag() self.register_buffer('sign_S', torch.sign(S)) self.log_S = nn.Parameter(torch.log(torch.abs(S))) self.U = nn.Parameter(torch.triu(U, diagonal=1)) self.register_buffer('eye', torch.diag(torch.ones(self. num_channels))) else: self.W = nn.Parameter(Q) def _assemble_W(self, inverse=False): L = torch.tril(self.L, diagonal=-1) + self.eye U = torch.triu(self.U, diagonal=1) + torch.diag(self.sign_S * torch .exp(self.log_S)) if inverse: if self.log_S.dtype == torch.float64: L_inv = torch.inverse(L) U_inv = torch.inverse(U) else: L_inv = torch.inverse(L.double()).type(self.log_S.dtype) U_inv = torch.inverse(U.double()).type(self.log_S.dtype) W = U_inv @ L_inv @ self.P.t() else: W = self.P @ L @ U return W def forward(self, z): if self.use_lu: W = self._assemble_W(inverse=True) log_det = -torch.sum(self.log_S) else: W_dtype = self.W.dtype if W_dtype == torch.float64: W = torch.inverse(self.W) else: W = torch.inverse(self.W.double()).type(W_dtype) W = W.view(*W.size(), 1, 1) log_det = -torch.slogdet(self.W)[1] W = W.view(self.num_channels, self.num_channels, 1, 1) z_ = torch.nn.functional.conv2d(z, W) log_det = log_det * z.size(2) * z.size(3) return z_, log_det def inverse(self, z): if self.use_lu: W = self._assemble_W() log_det = torch.sum(self.log_S) else: W = self.W log_det = torch.slogdet(self.W)[1] W = W.view(self.num_channels, self.num_channels, 1, 1) z_ = torch.nn.functional.conv2d(z, W) log_det = log_det * z.size(2) * z.size(3) return z_, log_det def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_convolution_view_1(in_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr1 + (y0 + 4 * x1), tmp1, xmask & ymask) tl.store(out_ptr2 + (x1 + 4 * y0), tmp1, xmask & ymask) @triton.jit def triton_poi_fused_mul_neg_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -tmp1 tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (1, 4)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(16)](primals_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = torch.ops.aten.linalg_inv_ex.default(buf0) del buf0 buf2 = buf1[0] del buf1 buf5 = empty_strided_cuda((4, 4, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__to_copy_convolution_view_1[grid(4, 4)](buf2, buf5, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf6 = torch.ops.aten._linalg_slogdet.default(primals_1) del primals_1 buf8 = buf6[1] buf9 = buf6[2] buf10 = buf6[3] del buf6 buf12 = extern_kernels.convolution(primals_2, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) del buf11 buf13 = buf8 del buf8 triton_poi_fused_mul_neg_2[grid(1)](buf13, 1, XBLOCK=1, num_warps=1, num_stages=1) return buf12, buf13, primals_2, buf5, buf9, buf10, reinterpret_tensor(buf2, (4, 4), (4, 1), 0) class Flow(nn.Module): """ Generic class for flow functions """ def __init__(self): super().__init__() def forward(self, z): """ :param z: input variable, first dimension is batch dim :return: transformed z and log of absolute determinant """ raise NotImplementedError('Forward pass has not been implemented.') def inverse(self, z): raise NotImplementedError('This flow has no algebraic inverse.') class Invertible1x1ConvNew(Flow): """ Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW """ def __init__(self, num_channels, use_lu=False): """ Constructor :param num_channels: Number of channels of the data :param use_lu: Flag whether to parametrize weights through the LU decomposition """ super().__init__() self.num_channels = num_channels self.use_lu = use_lu Q = torch.qr(torch.randn(self.num_channels, self.num_channels))[0] if use_lu: P, L, U = torch.lu_unpack(*Q.lu()) self.register_buffer('P', P) self.L = nn.Parameter(L) S = U.diag() self.register_buffer('sign_S', torch.sign(S)) self.log_S = nn.Parameter(torch.log(torch.abs(S))) self.U = nn.Parameter(torch.triu(U, diagonal=1)) self.register_buffer('eye', torch.diag(torch.ones(self. num_channels))) else: self.W = nn.Parameter(Q) def _assemble_W(self, inverse=False): L = torch.tril(self.L, diagonal=-1) + self.eye U = torch.triu(self.U, diagonal=1) + torch.diag(self.sign_S * torch .exp(self.log_S)) if inverse: if self.log_S.dtype == torch.float64: L_inv = torch.inverse(L) U_inv = torch.inverse(U) else: L_inv = torch.inverse(L.double()).type(self.log_S.dtype) U_inv = torch.inverse(U.double()).type(self.log_S.dtype) W = U_inv @ L_inv @ self.P.t() else: W = self.P @ L @ U return W def inverse(self, z): if self.use_lu: W = self._assemble_W() log_det = torch.sum(self.log_S) else: W = self.W log_det = torch.slogdet(self.W)[1] W = W.view(self.num_channels, self.num_channels, 1, 1) z_ = torch.nn.functional.conv2d(z, W) log_det = log_det * z.size(2) * z.size(3) return z_, log_det def forward(self, input_0): primals_1 = self.W primals_2 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
pkulwj1994/normalizing-flows
Invertible1x1Conv
false
16,296
[ "MIT" ]
96
326321c4aea4a3f6ab703f82e21277a79cd7d9e4
https://github.com/pkulwj1994/normalizing-flows/tree/326321c4aea4a3f6ab703f82e21277a79cd7d9e4
AttendNodeModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3n/c3nyedrc56xoj6pmjzzgnpithkx2vti6qsjnj43ybcoj67zutjs4.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/y3/cy3yhdklte2jljt3jlkxw4g7pzz4g3oiwcgjauhd3xpun5n7blb6.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logit], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class AttendNodeModule(nn.Module): def forward(self, node_vectors, query): """ Args: node_vectors [Tensor] (num_node, dim_v) : node feature vectors query [Tensor] (dim_v, ) : query vector Returns: attn [Tensor] (num_node, ): node attention by *SOFTMAX*. Actually it is attribute value attention, because we regard attribute values as nodes """ logit = torch.matmul(node_vectors, query) attn = F.softmax(logit, dim=0) return attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 return buf2, class AttendNodeModuleNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qiuyue1993/XNM-Net
AttendNodeModule
false
16,297
[ "MIT" ]
95
1c4a16fd745d9e90e0d7a08b21e7efca4d2c6195
https://github.com/qiuyue1993/XNM-Net/tree/1c4a16fd745d9e90e0d7a08b21e7efca4d2c6195
SmoothL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oy/coy44cuqkzfhtapgscts5hy66pdcgy4m7lfgn2qdwog4nbeuw5hd.py # Topologically Sorted Source Nodes: [val, abs_val, smooth_mask, pow_1, mul, sub_1, where, sum_1, mul_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.pow, aten.mul, aten.where, aten.sum] # Source node to ATen node mapping: # abs_val => abs_1 # mul => mul # mul_1 => mul_1 # pow_1 => pow_1 # smooth_mask => lt # sub_1 => sub_1 # sum_1 => sum_1 # val => sub # where => where # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, True), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %mul, %sub_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.1111111111111111), kwargs = {}) triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0 = async_compile.triton('triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp2 * tmp2 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp3 - tmp7 tmp10 = tl.where(tmp5, tmp8, tmp9) tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 < tmp4 tmp16 = tmp13 * tmp13 tmp17 = tmp16 * tmp7 tmp18 = tmp14 - tmp7 tmp19 = tl.where(tmp15, tmp17, tmp18) tmp20 = tmp10 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tmp24 < tmp4 tmp26 = tmp23 * tmp23 tmp27 = tmp26 * tmp7 tmp28 = tmp24 - tmp7 tmp29 = tl.where(tmp25, tmp27, tmp28) tmp30 = tmp20 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp34 < tmp4 tmp36 = tmp33 * tmp33 tmp37 = tmp36 * tmp7 tmp38 = tmp34 - tmp7 tmp39 = tl.where(tmp35, tmp37, tmp38) tmp40 = tmp30 + tmp39 tmp41 = 0.1111111111111111 tmp42 = tmp40 * tmp41 tl.store(in_out_ptr0 + (x0), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [val, abs_val, smooth_mask, pow_1, mul, sub_1, where, sum_1, mul_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.pow, aten.mul, aten.where, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0.run(buf1, arg1_1, arg0_1, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data def smooth_l1_loss(pred, target, weight, beta): val = target - pred abs_val = val.abs() smooth_mask = abs_val < beta return weight * torch.where(smooth_mask, 0.5 / beta * val ** 2, abs_val - 0.5 * beta).sum(dim=-1) class SmoothL1Loss(torch.nn.Module): def __init__(self, beta=1.0 / 9): super(SmoothL1Loss, self).__init__() self.beta = beta def forward(self, input, target, size_average=True): return smooth_l1_loss(input, target, self.beta, size_average) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp2 * tmp2 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp3 - tmp7 tmp10 = tl.where(tmp5, tmp8, tmp9) tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 < tmp4 tmp16 = tmp13 * tmp13 tmp17 = tmp16 * tmp7 tmp18 = tmp14 - tmp7 tmp19 = tl.where(tmp15, tmp17, tmp18) tmp20 = tmp10 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tmp24 < tmp4 tmp26 = tmp23 * tmp23 tmp27 = tmp26 * tmp7 tmp28 = tmp24 - tmp7 tmp29 = tl.where(tmp25, tmp27, tmp28) tmp30 = tmp20 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp34 < tmp4 tmp36 = tmp33 * tmp33 tmp37 = tmp36 * tmp7 tmp38 = tmp34 - tmp7 tmp39 = tl.where(tmp35, tmp37, tmp38) tmp40 = tmp30 + tmp39 tmp41 = 0.1111111111111111 tmp42 = tmp40 * tmp41 tl.store(in_out_ptr0 + x0, tmp42, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0[grid(64)](buf1, arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, def smooth_l1_loss(pred, target, weight, beta): val = target - pred abs_val = val.abs() smooth_mask = abs_val < beta return weight * torch.where(smooth_mask, 0.5 / beta * val ** 2, abs_val - 0.5 * beta).sum(dim=-1) class SmoothL1LossNew(torch.nn.Module): def __init__(self, beta=1.0 / 9): super(SmoothL1LossNew, self).__init__() self.beta = beta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qilei123/FreeAnchor
SmoothL1Loss
false
16,298
[ "MIT" ]
495
80361a7addb7d84a50863a6b34734d28034c7256
https://github.com/qilei123/FreeAnchor/tree/80361a7addb7d84a50863a6b34734d28034c7256
FillUpLuminance
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jz/cjzehomfx565ov2vd77tvn6izixj4rxksoliqe4o4ulyjxbejgtw.py # Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.rsub, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul), kwargs = {}) triton_poi_fused_add_mul_rsub_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.rsub, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class FillUpLuminance(torch.nn.Module): def __init__(self): super(FillUpLuminance, self).__init__() def forward(self, color, luminance): return color + (1 - color) * luminance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FillUpLuminanceNew(torch.nn.Module): def __init__(self): super(FillUpLuminanceNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qway/nerfmeshes
FillUpLuminance
false
16,299
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
CmapPafHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 return (buf1, buf3, primals_1, primals_3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHead(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHead, self).__init__() if num_upsample > 0: self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0)) self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0)) else: self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): return self.cmap_conv(x), self.paf_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf1, buf3, primals_1, primals_3, primals_4 class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadNew(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadNew, self).__init__() if num_upsample > 0: self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0)) self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0)) else: self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.cmap_conv.weight primals_2 = self.cmap_conv.bias primals_4 = self.paf_conv.weight primals_5 = self.paf_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
quantd2/trt_pose
CmapPafHead
false
16,300
[ "MIT" ]
738
44c5e826977f20c8dad2d9725313a18cb2189853
https://github.com/quantd2/trt_pose/tree/44c5e826977f20c8dad2d9725313a18cb2189853
MultiplyLuminance
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/s3/cs33uypamoewjqlubot47mhpfjzncecngllor3y3ydtbzp2x5e2h.py # Topologically Sorted Source Nodes: [add, mul], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %add), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul], Original ATen: [aten.add, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MultiplyLuminance(torch.nn.Module): def __init__(self): super(MultiplyLuminance, self).__init__() def forward(self, color, luminance): return color * (1 + luminance) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class MultiplyLuminanceNew(torch.nn.Module): def __init__(self): super(MultiplyLuminanceNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qway/nerfmeshes
MultiplyLuminance
false
16,301
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
AGELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def agelu(x): SQRT_M2_PI = math.sqrt(2 / math.pi) COEFF = 0.044715 return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow( x, 3)))) class AGELU(torch.nn.Module): def forward(self, input): return agelu(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data import torch.cuda import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def agelu(x): SQRT_M2_PI = math.sqrt(2 / math.pi) COEFF = 0.044715 return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow( x, 3)))) class AGELUNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
quanpn90/NMTGMinor
AGELU
false
16,302
[ "MIT" ]
75
0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796
https://github.com/quanpn90/NMTGMinor/tree/0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796
CoSirenModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ds/cds2hiu4hiviixznddlgwpdudomtggtrh7moz4blfltypgg6ex3j.py # Topologically Sorted Source Nodes: [cat, sub], Original ATen: [aten.cat, aten.sub] # Source node to ATen node mapping: # cat => cat # sub => sub # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], -1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, 0.7853981633974483), kwargs = {}) triton_poi_fused_cat_sub_0 = async_compile.triton('triton_poi_fused_cat_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl_math.sin(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr0 + ((2*x1) + ((-2) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.cos(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tmp17 = 0.7853981633974483 tmp18 = tmp16 - tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat, sub], Original ATen: [aten.cat, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_cat_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch class CoSirenModule(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(CoSirenModule, self).__init__() self.linear = torch.nn.Linear(in_features, out_features // 2) init_bounds = math.sqrt(24 / in_features) * weight_multiplier torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b= init_bounds) def forward(self, x): x = self.linear(x) return torch.cat([torch.sin(x), torch.cos(x)], dim=-1) - math.pi / 4 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl_math.sin(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.cos(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tmp17 = 0.7853981633974483 tmp18 = tmp16 - tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class CoSirenModuleNew(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(CoSirenModuleNew, self).__init__() self.linear = torch.nn.Linear(in_features, out_features // 2) init_bounds = math.sqrt(24 / in_features) * weight_multiplier torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b= init_bounds) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
qway/nerfmeshes
CoSirenModule
false
16,303
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
DistillLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mc/cmc44gqwlbgitm3uqkuiwz6fe3jirwculg7zmyndeuqzyyqzyok7.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a6/ca6su5tfbavlpmct7oukp3qb4pitves5amhij6fvosqagvl5y7go.py # Topologically Sorted Source Nodes: [l1], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # l1 => amax_2, sub_4 # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_2), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = triton_helpers.maximum(tmp3, tmp5) tmp18 = triton_helpers.maximum(tmp17, tmp8) tmp19 = triton_helpers.maximum(tmp18, tmp11) tmp20 = tmp0 - tmp19 tl.store(out_ptr0 + (x3), tmp16, xmask) tl.store(out_ptr1 + (x3), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bl/cblrl74dsfb6r55uzfbzsrwf5sywaxnfs4s3famne4jeoc5jhd66.py # Topologically Sorted Source Nodes: [softmax, l0, log_softmax, mul, mul_1, l1, mul_2, add], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div, aten.neg, aten.add] # Source node to ATen node mapping: # add => add # l0 => div_3, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # l1 => div_4, exp_2, log_2, mul_2, neg, sub_5, sum_4, sum_5 # log_softmax => exp, log, sub_1, sum_1 # mul => mul_3 # mul_1 => mul_4 # mul_2 => mul_5 # softmax => div_2, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, 16), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %arg2_1), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_5,), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, -3.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 16, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (r3), None) tmp18 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (r3), None) tmp37 = tl.load(in_ptr2 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr2 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr3 + (r3), None) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp38 = tl_math.exp(tmp37) tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tmp43 = tl_math.exp(tmp42) tmp44 = tmp41 + tmp43 tmp46 = tl_math.exp(tmp45) tmp47 = tmp44 + tmp46 tmp48 = tl_math.log(tmp47) tmp49 = tmp36 - tmp48 tmp51 = tmp49 * tmp50 tmp52 = tl.broadcast_to(tmp51, [RBLOCK]) tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0)) tmp55 = 0.25 tmp56 = tmp35 * tmp55 tmp57 = 4.0 tmp58 = tmp56 * tmp57 tmp59 = 16.0 tmp60 = tmp58 * tmp59 tmp61 = -tmp54 tmp62 = 0.015625 tmp63 = tmp61 * tmp62 tmp64 = -3.0 tmp65 = tmp63 * tmp64 tmp66 = tmp60 + tmp65 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp66, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [l1], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(arg0_1, buf2, buf4, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, l0, log_softmax, mul, mul_1, l1, mul_2, add], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div, aten.neg, aten.add] triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2.run(buf6, buf0, buf2, buf4, arg2_1, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 del buf2 del buf4 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DistillLoss(nn.Module): def __init__(self, alpha, temperature, k=None): super(DistillLoss, self).__init__() self.alpha = alpha self.start_alpha = alpha self.temperature = temperature self.kl_loss = nn.KLDivLoss(reduction='batchmean') self.TT = self.temperature * self.temperature self.ce = nn.CrossEntropyLoss() self.K = k def forward(self, student_out: 'torch.Tensor', teacher_out: 'torch.Tensor', label: 'torch.Tensor'): if self.K is not None: _, index = teacher_out.topk(student_out.shape[1] - self.K, dim= 1, largest=False) teacher_out[index] = 0.0 l0 = self.kl_loss(F.log_softmax(student_out / self.temperature, dim =1), F.softmax(teacher_out / self.temperature, dim=1)) l1 = self.ce(student_out, label) return l0 * self.alpha * self.TT + l1 * (1.0 - self.alpha) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4, 'temperature': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = triton_helpers.maximum(tmp3, tmp5) tmp18 = triton_helpers.maximum(tmp17, tmp8) tmp19 = triton_helpers.maximum(tmp18, tmp11) tmp20 = tmp0 - tmp19 tl.store(out_ptr0 + x3, tmp16, xmask) tl.store(out_ptr1 + x3, tmp20, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr2 + r3, None) tmp37 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr2 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp45 = tl.load(in_ptr2 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr3 + r3, None) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp38 = tl_math.exp(tmp37) tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tmp43 = tl_math.exp(tmp42) tmp44 = tmp41 + tmp43 tmp46 = tl_math.exp(tmp45) tmp47 = tmp44 + tmp46 tmp48 = tl_math.log(tmp47) tmp49 = tmp36 - tmp48 tmp51 = tmp49 * tmp50 tmp52 = tl.broadcast_to(tmp51, [RBLOCK]) tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0)) tmp55 = 0.25 tmp56 = tmp35 * tmp55 tmp57 = 4.0 tmp58 = tmp56 * tmp57 tmp59 = 16.0 tmp60 = tmp58 * tmp59 tmp61 = -tmp54 tmp62 = 0.015625 tmp63 = tmp61 * tmp62 tmp64 = -3.0 tmp65 = tmp63 * tmp64 tmp66 = tmp60 + tmp65 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp66, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf6 = buf3 del buf3 triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2[ grid(1)](buf6, buf0, buf2, buf4, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 del buf2 del buf4 return buf6, class DistillLossNew(nn.Module): def __init__(self, alpha, temperature, k=None): super(DistillLossNew, self).__init__() self.alpha = alpha self.start_alpha = alpha self.temperature = temperature self.kl_loss = nn.KLDivLoss(reduction='batchmean') self.TT = self.temperature * self.temperature self.ce = nn.CrossEntropyLoss() self.K = k def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
qinjian623/pytorch_toys
DistillLoss
false
16,304
[ "MIT" ]
56
7f4761bddc65282ea31a2d0f9eb146772276dd7c
https://github.com/qinjian623/pytorch_toys/tree/7f4761bddc65282ea31a2d0f9eb146772276dd7c
Connect2Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mq/cmqqbqcbxlzvej3i4srpbdfgxirnooh2wgnwtdxbgyt3e2d762nv.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_7,), kwargs = {}) triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 1024, grid=grid(1024), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf2 # reuse buf9 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 1024, grid=grid(1024), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_3.run(buf8, primals_9, 64, grid=grid(64), stream=stream0) del primals_9 return (buf7, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(buf3, (64, 16), (16, 1), 0), buf7, buf8, primals_8, primals_6, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Connect2Model(nn.Module): def __init__(self, board_size, action_size, device): super(Connect2Model, self).__init__() self.device = device self.size = board_size self.action_size = action_size self.fc1 = nn.Linear(in_features=self.size, out_features=16) self.fc2 = nn.Linear(in_features=16, out_features=16) self.action_head = nn.Linear(in_features=16, out_features=self. action_size) self.value_head = nn.Linear(in_features=16, out_features=1) self def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) action_logits = self.action_head(x) value_logit = self.value_head(x) return F.softmax(action_logits, dim=1), torch.tanh(value_logit) def predict(self, board): board = torch.FloatTensor(board.astype(np.float32)) board = board.view(1, self.size) self.eval() with torch.no_grad(): pi, v = self.forward(board) return pi.data.cpu().numpy()[0], v.data.cpu().numpy()[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'board_size': 4, 'action_size': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf10, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3, primals_5, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf5 triton_poi_fused_tanh_3[grid(64)](buf8, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return buf7, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0 ), buf7, buf8, primals_8, primals_6, buf9, primals_4, buf10 class Connect2ModelNew(nn.Module): def __init__(self, board_size, action_size, device): super(Connect2ModelNew, self).__init__() self.device = device self.size = board_size self.action_size = action_size self.fc1 = nn.Linear(in_features=self.size, out_features=16) self.fc2 = nn.Linear(in_features=16, out_features=16) self.action_head = nn.Linear(in_features=16, out_features=self. action_size) self.value_head = nn.Linear(in_features=16, out_features=1) self def predict(self, board): board = torch.FloatTensor(board.astype(np.float32)) board = board.view(1, self.size) self.eval() with torch.no_grad(): pi, v = self.forward(board) return pi.data.cpu().numpy()[0], v.data.cpu().numpy()[0] def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.action_head.weight primals_7 = self.action_head.bias primals_8 = self.value_head.weight primals_9 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
quangchiem139/AlphaZeroSimple
Connect2Model
false
16,305
[ "MIT" ]
76
1b1096cc4b2aded6337a90035aee56b370ea1d3a
https://github.com/quangchiem139/AlphaZeroSimple/tree/1b1096cc4b2aded6337a90035aee56b370ea1d3a
SimpleSpatialEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gi/cgi3fss626cxoqglanqxywnf4pyojcrst3yxyhm5wcmiug6pi5ni.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl_math.sin(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 8, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr0 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.cos(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf1, 512, grid=grid(512), stream=stream0) return (buf1, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SimpleSpatialEmbedding(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(SimpleSpatialEmbedding, self).__init__() self.b = torch.zeros((in_features, out_features)) self.b.normal_(0, weight_multiplier) self.b = torch.nn.Parameter(2.0 ** self.b - 1) self.osize = out_features def forward(self, x): x = torch.matmul(x, self.b) return torch.cat([torch.sin(x), torch.cos(x)], dim=-1) def output_size(self): return 2 * self.osize def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl_math.sin(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp12 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.cos(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=128, num_warps=4, num_stages=1) return buf1, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class SimpleSpatialEmbeddingNew(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(SimpleSpatialEmbeddingNew, self).__init__() self.b = torch.zeros((in_features, out_features)) self.b.normal_(0, weight_multiplier) self.b = torch.nn.Parameter(2.0 ** self.b - 1) self.osize = out_features def output_size(self): return 2 * self.osize def forward(self, input_0): primals_1 = self.b primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
qway/nerfmeshes
SimpleSpatialEmbedding
false
16,306
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
SkipModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %view_3], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_3, buf1, buf2, 512, grid=grid(512), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_7 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SkipModule(torch.nn.Module): def __init__(self, in_features, out_features, activation=torch.nn.ReLU()): super(SkipModule, self).__init__() self.linear1 = torch.nn.Linear(in_features, out_features, activation) self.linear2 = torch.nn.Linear(out_features, out_features, activation) self.linear3 = torch.nn.Linear(in_features + out_features, out_features, activation) def forward(self, x): x1 = self.linear1(x) x1 = self.linear2(x1) x = torch.cat((x, x1), dim=-1) return self.linear3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) buf3 = buf1 del buf1 extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_7 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 8), (8, 1), 0 ), primals_6, primals_4 class SkipModuleNew(torch.nn.Module): def __init__(self, in_features, out_features, activation=torch.nn.ReLU()): super(SkipModuleNew, self).__init__() self.linear1 = torch.nn.Linear(in_features, out_features, activation) self.linear2 = torch.nn.Linear(out_features, out_features, activation) self.linear3 = torch.nn.Linear(in_features + out_features, out_features, activation) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
qway/nerfmeshes
SkipModule
false
16,307
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
SimpleEmbed
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] # Source node to ATen node mapping: # linear => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bo/cbo5jfo2rm27byighit5d7gl6qzpfzke42uzd4dbhfk2jk2koirb.py # Topologically Sorted Source Nodes: [linear, out], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # linear => add # out => mul # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_3), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [linear, out], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class SimpleEmbed(nn.Module): def __init__(self, d_feat, embed_dim): super(SimpleEmbed, self).__init__() self.d_feat = d_feat self.embed_dim = embed_dim self.proj = nn.Linear(d_feat, embed_dim) def forward(self, x): x = x.reshape(len(x), self.d_feat, -1) x = x.permute(0, 2, 1) out = self.proj(x) * math.sqrt(self.embed_dim) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_feat': 4, 'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0) del buf1 triton_poi_fused_add_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class SimpleEmbedNew(nn.Module): def __init__(self, d_feat, embed_dim): super(SimpleEmbedNew, self).__init__() self.d_feat = d_feat self.embed_dim = embed_dim self.proj = nn.Linear(d_feat, embed_dim) def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
rainwangphy/AutoDL-Projects
SimpleEmbed
false
16,308
[ "MIT" ]
923
1a40948255ac3c16ee529d94144a39bf26e89bfa
https://github.com/rainwangphy/AutoDL-Projects/tree/1a40948255ac3c16ee529d94144a39bf26e89bfa
Embbed2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/j4/cj42y3ycik4bz72xcuz6dg2n7ynzf3mjr6cbopfd37chfwo2atfp.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl_math.sin(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + ((-4) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl_math.cos(tmp15) tmp17 = tmp14 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp10, tmp19) tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_3, buf0, buf1, 512, grid=grid(512), stream=stream0) return (buf1, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Embbed2(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(Embbed2, self).__init__() self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features // in_features) - 1 self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) * self.b[:, None, None], [out_features, in_features])) self.osize = out_features self.a = torch.nn.Parameter(torch.ones(out_features)) def forward(self, x): x = torch.matmul(x, self.b.T) return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1 ) def output_size(self): return 2 * self.osize def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl_math.sin(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr0 + (-4 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl_math.cos(tmp15) tmp17 = tmp14 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp10, tmp19) tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, buf0, buf1, 512, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0 class Embbed2New(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(Embbed2New, self).__init__() self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features // in_features) - 1 self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) * self.b[:, None, None], [out_features, in_features])) self.osize = out_features self.a = torch.nn.Parameter(torch.ones(out_features)) def output_size(self): return 2 * self.osize def forward(self, input_0): primals_1 = self.b primals_3 = self.a primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
qway/nerfmeshes
Embbed2
false
16,309
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
SpatialEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/j4/cj42y3ycik4bz72xcuz6dg2n7ynzf3mjr6cbopfd37chfwo2atfp.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl_math.sin(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + ((-4) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl_math.cos(tmp15) tmp17 = tmp14 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp10, tmp19) tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_3, buf0, buf1, 512, grid=grid(512), stream=stream0) return (buf1, primals_3, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SpatialEmbedding(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(SpatialEmbedding, self).__init__() self.b = torch.zeros((in_features, out_features)) self.b.normal_(0, weight_multiplier) self.b = torch.nn.Parameter(2.0 ** self.b - 1) self.osize = out_features self.a = torch.nn.Parameter(torch.ones(out_features)) def forward(self, x): x = torch.matmul(x, self.b) return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1 ) def output_size(self): return 2 * self.osize def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tl_math.sin(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr0 + (-4 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl_math.cos(tmp15) tmp17 = tmp14 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp10, tmp19) tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, buf0, buf1, 512, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_3, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class SpatialEmbeddingNew(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(SpatialEmbeddingNew, self).__init__() self.b = torch.zeros((in_features, out_features)) self.b.normal_(0, weight_multiplier) self.b = torch.nn.Parameter(2.0 ** self.b - 1) self.osize = out_features self.a = torch.nn.Parameter(torch.ones(out_features)) def output_size(self): return 2 * self.osize def forward(self, input_0): primals_1 = self.b primals_3 = self.a primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
qway/nerfmeshes
SpatialEmbedding
false
16,310
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
PotCoSirenModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qe/cqel7mioyye7q3y73sso7e6bwxvlviuqkgwuel2tba3dre2atvnk.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl_math.sin(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr0 + ((2*x1) + ((-2) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.cos(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class PotCoSirenModule(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(PotCoSirenModule, self).__init__() self.linear = torch.nn.Linear(in_features, out_features // 2) torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b= weight_multiplier) self.linear.weight.data = 2 ** self.linear.weight.data def forward(self, x): x = self.linear(x) return torch.cat([torch.sin(x), torch.cos(x)], dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl_math.sin(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl_math.cos(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class PotCoSirenModuleNew(torch.nn.Module): def __init__(self, in_features, out_features, weight_multiplier=1.0): super(PotCoSirenModuleNew, self).__init__() self.linear = torch.nn.Linear(in_features, out_features // 2) torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b= weight_multiplier) self.linear.weight.data = 2 ** self.linear.weight.data def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
qway/nerfmeshes
PotCoSirenModule
false
16,311
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bs/cbstxeghddltznr7shuzsnth6ngv6mnftr2w7pqzzm5flm72plbl.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.convolution] # Source node to ATen node mapping: # e => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/to/ctohnag3s2my72gfje47knfyigkawenmq4hwyddhmhls6qicb3io.py # Topologically Sorted Source Nodes: [e, expanded_q, add, tanh], Original ATen: [aten.convolution, aten.repeat, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # e => convolution # expanded_q => repeat # tanh => tanh # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 1, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, %convolution), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_convolution_repeat_tanh_1 = async_compile.triton('triton_poi_fused_add_convolution_repeat_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_repeat_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 4) % 4 x3 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp2 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr0 + (x4), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf1, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [e, expanded_q, add, tanh], Original ATen: [aten.convolution, aten.repeat, aten.add, aten.tanh] triton_poi_fused_add_convolution_repeat_tanh_1.run(buf3, primals_6, buf0, buf4, 64, grid=grid(64), stream=stream0) del primals_6 buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_7, (4, 1, 4), (0, 0, 1), 0), buf4, out=buf5) return (buf3, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), primals_4, primals_5, primals_7, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class Attention(nn.Module): """A generic attention module for a decoder in seq2seq""" def __init__(self, dim, use_tanh=False, C=10): super(Attention, self).__init__() self.use_tanh = use_tanh self.project_query = nn.Linear(dim, dim) self.project_ref = nn.Conv1d(dim, dim, 1, 1) self.C = C self.tanh = nn.Tanh() self.v = nn.Parameter(torch.FloatTensor(dim)) self.v.data.uniform_(-(1.0 / math.sqrt(dim)), 1.0 / math.sqrt(dim)) def forward(self, query, ref): """ Args: query: is the hidden state of the decoder at the current time step. batch x dim ref: the set of hidden states from the encoder. sourceL x batch x hidden_dim """ ref = ref.permute(1, 2, 0) q = self.project_query(query).unsqueeze(2) e = self.project_ref(ref) expanded_q = q.repeat(1, 1, e.size(2)) v_view = self.v.unsqueeze(0).expand(expanded_q.size(0), len(self.v) ).unsqueeze(1) u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1) if self.use_tanh: logits = self.C * self.tanh(u) else: logits = u return e, logits def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x3 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp2 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr0 + x4, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf2 del buf2 buf4 = buf1 del buf1 triton_poi_fused_add_convolution_repeat_tanh_1[grid(64)](buf3, primals_6, buf0, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(primals_7, (4, 1, 4), (0, 0, 1), 0), buf4, out=buf5) return buf3, reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_4, primals_5, primals_7, reinterpret_tensor(primals_1, ( 4, 4, 4), (4, 1, 16), 0), buf4 class AttentionNew(nn.Module): """A generic attention module for a decoder in seq2seq""" def __init__(self, dim, use_tanh=False, C=10): super(AttentionNew, self).__init__() self.use_tanh = use_tanh self.project_query = nn.Linear(dim, dim) self.project_ref = nn.Conv1d(dim, dim, 1, 1) self.C = C self.tanh = nn.Tanh() self.v = nn.Parameter(torch.FloatTensor(dim)) self.v.data.uniform_(-(1.0 / math.sqrt(dim)), 1.0 / math.sqrt(dim)) def forward(self, input_0, input_1): primals_3 = self.v primals_2 = self.project_query.weight primals_6 = self.project_query.bias primals_5 = self.project_ref.weight primals_7 = self.project_ref.bias primals_4 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
rdjdejong/attention-learn-to-route
Attention
false
16,312
[ "MIT" ]
540
3b6bbdad677a36df53eabad98b48f436be298ac8
https://github.com/rdjdejong/attention-learn-to-route/tree/3b6bbdad677a36df53eabad98b48f436be298ac8
RandomShiftsAug
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sq/csqzbjgigod7iwdpblwlrtqegrwxplnexkan6oxv2b3voplnbg6n.py # Topologically Sorted Source Nodes: [base_grid], Original ATen: [aten.cat] # Source node to ATen node mapping: # base_grid => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %permute], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 4 x2 = (xindex // 8) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = 6.0 tmp8 = tmp6 < tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp6 * tmp9 tmp11 = -0.9166666666666666 tmp12 = tmp10 + tmp11 tmp13 = 11 + ((-1)*x1) tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp9 tmp16 = 0.9166666666666666 tmp17 = tmp16 - tmp15 tmp18 = tl.where(tmp8, tmp12, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tmp22 = tl.full([1], 2, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = x2 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp7 tmp27 = tmp25 * tmp9 tmp28 = tmp27 + tmp11 tmp29 = 11 + ((-1)*x2) tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp9 tmp32 = tmp16 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp21, tmp33, tmp34) tmp36 = tl.where(tmp4, tmp20, tmp35) tl.store(out_ptr0 + (x4), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ec/cec7fujvvbe4hi4zs7gswfcyerr6hgw7amw2cirx7s2wg224nm7z.py # Topologically Sorted Source Nodes: [x_1, grid_sample], Original ATen: [aten.replication_pad2d, aten.grid_sampler_2d] # Source node to ATen node mapping: # grid_sample => add_2, add_3, add_4, add_5, add_6, add_7, add_8, convert_element_type_4, convert_element_type_5, convert_element_type_6, convert_element_type_7, convert_element_type_8, convert_element_type_9, floor, floor_1, full_default_10, full_default_11, full_default_2, full_default_3, full_default_4, full_default_5, full_default_6, full_default_7, full_default_8, full_default_9, ge, ge_1, ge_2, ge_3, ge_4, ge_5, ge_6, ge_7, index, index_1, index_2, index_3, logical_and, logical_and_1, logical_and_10, logical_and_11, logical_and_2, logical_and_3, logical_and_4, logical_and_5, logical_and_6, logical_and_7, logical_and_8, logical_and_9, lt_1, lt_2, lt_3, lt_4, lt_5, lt_6, lt_7, lt_8, mul_10, mul_11, mul_12, mul_3, mul_4, mul_5, mul_6, mul_7, mul_8, mul_9, sub_2, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9, where_10, where_11, where_12, where_3, where_4, where_5, where_6, where_7, where_8, where_9 # x_1 => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=4] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 6.0), kwargs = {}) # %add_2 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 5.5), kwargs = {}) # %floor : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_2,), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor, 0), kwargs = {}) # %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor, 12), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 6.0), kwargs = {}) # %add_3 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 5.5), kwargs = {}) # %floor_1 : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_3,), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_1, 0), kwargs = {}) # %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_1, 12), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_2), kwargs = {}) # %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_1, %logical_and), kwargs = {}) # %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_2, %where_1]), kwargs = {}) # %add_4 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor, 1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_2), kwargs = {}) # %add_5 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor_1, 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sub_3), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %mul_5, %full_default_2), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_3), kwargs = {}) # %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_4, 0), kwargs = {}) # %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_4, 12), kwargs = {}) # %ge_3 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_1, 0), kwargs = {}) # %lt_4 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_1, 12), kwargs = {}) # %logical_and_3 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_3, %lt_4), kwargs = {}) # %logical_and_4 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_3, %logical_and_3), kwargs = {}) # %logical_and_5 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_2, %logical_and_4), kwargs = {}) # %convert_element_type_5 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor_1, torch.int64), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %convert_element_type_5, %full_default_4), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.int64), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %convert_element_type_4, %full_default_3), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_5, %where_4]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %floor), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %sub_5), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %mul_6, %full_default_5), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_1, %where_6), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %ge_4 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor, 0), kwargs = {}) # %lt_5 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor, 12), kwargs = {}) # %ge_5 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_5, 0), kwargs = {}) # %lt_6 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_5, 12), kwargs = {}) # %logical_and_6 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_5, %lt_6), kwargs = {}) # %logical_and_7 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_5, %logical_and_6), kwargs = {}) # %logical_and_8 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_4, %logical_and_7), kwargs = {}) # %convert_element_type_7 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_5, torch.int64), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %convert_element_type_7, %full_default_7), kwargs = {}) # %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor, torch.int64), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %convert_element_type_6, %full_default_6), kwargs = {}) # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_8, %where_7]), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_2), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %floor_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %sub_7), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %mul_7, %full_default_8), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %where_9), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_11), kwargs = {}) # %ge_6 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_4, 0), kwargs = {}) # %lt_7 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_4, 12), kwargs = {}) # %ge_7 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_5, 0), kwargs = {}) # %lt_8 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_5, 12), kwargs = {}) # %logical_and_9 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_7, %lt_8), kwargs = {}) # %logical_and_10 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_7, %logical_and_9), kwargs = {}) # %logical_and_11 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_6, %logical_and_10), kwargs = {}) # %convert_element_type_9 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_5, torch.int64), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %convert_element_type_9, %full_default_10), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.int64), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_10 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %convert_element_type_8, %full_default_9), kwargs = {}) # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_11, %where_10]), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %floor), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %floor_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %sub_9), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %mul_8, %full_default_11), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_3, %where_12), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_12), kwargs = {}) triton_poi_fused_grid_sampler_2d_replication_pad2d_1 = async_compile.triton('triton_poi_fused_grid_sampler_2d_replication_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_replication_pad2d_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_grid_sampler_2d_replication_pad2d_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x5 = xindex x3 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2*x2), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + (2*x2)), xmask, eviction_policy='evict_last') tmp2 = 0.16666666666666666 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 6.0 tmp6 = tmp4 * tmp5 tmp7 = 5.5 tmp8 = tmp6 + tmp7 tmp9 = libdevice.floor(tmp8) tmp10 = 0.0 tmp11 = tmp9 >= tmp10 tmp12 = 12.0 tmp13 = tmp9 < tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp18 = tmp17 * tmp5 tmp19 = tmp18 + tmp7 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20 >= tmp10 tmp22 = tmp20 < tmp12 tmp23 = tmp21 & tmp22 tmp24 = tmp13 & tmp23 tmp25 = tmp11 & tmp24 tmp26 = 1.0 tmp27 = tmp9 + tmp26 tmp28 = tmp27 - tmp8 tmp29 = tmp20 + tmp26 tmp30 = tmp29 - tmp19 tmp31 = tmp28 * tmp30 tmp32 = tl.where(tmp25, tmp31, tmp10) tmp33 = tmp27 >= tmp10 tmp34 = tmp27 < tmp12 tmp35 = tmp34 & tmp23 tmp36 = tmp33 & tmp35 tmp37 = tmp8 - tmp9 tmp38 = tmp37 * tmp30 tmp39 = tl.where(tmp36, tmp38, tmp10) tmp40 = tmp29 >= tmp10 tmp41 = tmp29 < tmp12 tmp42 = tmp40 & tmp41 tmp43 = tmp34 & tmp42 tmp44 = tmp33 & tmp43 tmp45 = tmp19 - tmp20 tmp46 = tmp37 * tmp45 tmp47 = tl.where(tmp44, tmp46, tmp10) tmp48 = tmp13 & tmp42 tmp49 = tmp11 & tmp48 tmp50 = tmp28 * tmp45 tmp51 = tl.where(tmp49, tmp50, tmp10) tmp52 = tmp20.to(tl.int64) tmp53 = tl.full([1], 0, tl.int64) tmp54 = tl.where(tmp25, tmp52, tmp53) tmp55 = tl.full([XBLOCK], 12, tl.int32) tmp56 = tmp54 + tmp55 tmp57 = tmp54 < 0 tmp58 = tl.where(tmp57, tmp56, tmp54) tl.device_assert(((0 <= tmp58) & (tmp58 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp58 < 12") tmp60 = tmp9.to(tl.int64) tmp61 = tl.where(tmp25, tmp60, tmp53) tmp62 = tmp61 + tmp55 tmp63 = tmp61 < 0 tmp64 = tl.where(tmp63, tmp62, tmp61) tl.device_assert(((0 <= tmp64) & (tmp64 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp64 < 12") tmp66 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp58)) + ((-4) + tmp58) * (((-4) + tmp58) > (0))))) + (((0) * ((0) >= ((-4) + tmp58)) + ((-4) + tmp58) * (((-4) + tmp58) > (0)))) * ((((0) * ((0) >= ((-4) + tmp58)) + ((-4) + tmp58) * (((-4) + tmp58) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp64)) + ((-4) + tmp64) * (((-4) + tmp64) > (0))))) + (((0) * ((0) >= ((-4) + tmp64)) + ((-4) + tmp64) * (((-4) + tmp64) > (0)))) * ((((0) * ((0) >= ((-4) + tmp64)) + ((-4) + tmp64) * (((-4) + tmp64) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp67 = tl.where(tmp36, tmp52, tmp53) tmp68 = tmp27.to(tl.int64) tmp69 = tl.where(tmp36, tmp68, tmp53) tmp70 = tmp29.to(tl.int64) tmp71 = tl.where(tmp49, tmp70, tmp53) tmp72 = tl.where(tmp49, tmp60, tmp53) tmp73 = tmp66 * tmp32 tmp74 = tmp67 + tmp55 tmp75 = tmp67 < 0 tmp76 = tl.where(tmp75, tmp74, tmp67) tl.device_assert(((0 <= tmp76) & (tmp76 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp76 < 12") tmp78 = tmp69 + tmp55 tmp79 = tmp69 < 0 tmp80 = tl.where(tmp79, tmp78, tmp69) tl.device_assert(((0 <= tmp80) & (tmp80 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp80 < 12") tmp82 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp76)) + ((-4) + tmp76) * (((-4) + tmp76) > (0))))) + (((0) * ((0) >= ((-4) + tmp76)) + ((-4) + tmp76) * (((-4) + tmp76) > (0)))) * ((((0) * ((0) >= ((-4) + tmp76)) + ((-4) + tmp76) * (((-4) + tmp76) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp80)) + ((-4) + tmp80) * (((-4) + tmp80) > (0))))) + (((0) * ((0) >= ((-4) + tmp80)) + ((-4) + tmp80) * (((-4) + tmp80) > (0)))) * ((((0) * ((0) >= ((-4) + tmp80)) + ((-4) + tmp80) * (((-4) + tmp80) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp83 = tmp82 * tmp39 tmp84 = tmp73 + tmp83 tmp85 = tmp71 + tmp55 tmp86 = tmp71 < 0 tmp87 = tl.where(tmp86, tmp85, tmp71) tl.device_assert(((0 <= tmp87) & (tmp87 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp87 < 12") tmp89 = tmp72 + tmp55 tmp90 = tmp72 < 0 tmp91 = tl.where(tmp90, tmp89, tmp72) tl.device_assert(((0 <= tmp91) & (tmp91 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp91 < 12") tmp93 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp87)) + ((-4) + tmp87) * (((-4) + tmp87) > (0))))) + (((0) * ((0) >= ((-4) + tmp87)) + ((-4) + tmp87) * (((-4) + tmp87) > (0)))) * ((((0) * ((0) >= ((-4) + tmp87)) + ((-4) + tmp87) * (((-4) + tmp87) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp91)) + ((-4) + tmp91) * (((-4) + tmp91) > (0))))) + (((0) * ((0) >= ((-4) + tmp91)) + ((-4) + tmp91) * (((-4) + tmp91) > (0)))) * ((((0) * ((0) >= ((-4) + tmp91)) + ((-4) + tmp91) * (((-4) + tmp91) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp94 = tmp93 * tmp51 tmp95 = tmp84 + tmp94 tmp96 = tl.where(tmp44, tmp70, tmp53) tmp97 = tl.where(tmp44, tmp68, tmp53) tmp98 = tmp96 + tmp55 tmp99 = tmp96 < 0 tmp100 = tl.where(tmp99, tmp98, tmp96) tl.device_assert(((0 <= tmp100) & (tmp100 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp100 < 12") tmp102 = tmp97 + tmp55 tmp103 = tmp97 < 0 tmp104 = tl.where(tmp103, tmp102, tmp97) tl.device_assert(((0 <= tmp104) & (tmp104 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp104 < 12") tmp106 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp100)) + ((-4) + tmp100) * (((-4) + tmp100) > (0))))) + (((0) * ((0) >= ((-4) + tmp100)) + ((-4) + tmp100) * (((-4) + tmp100) > (0)))) * ((((0) * ((0) >= ((-4) + tmp100)) + ((-4) + tmp100) * (((-4) + tmp100) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp104)) + ((-4) + tmp104) * (((-4) + tmp104) > (0))))) + (((0) * ((0) >= ((-4) + tmp104)) + ((-4) + tmp104) * (((-4) + tmp104) > (0)))) * ((((0) * ((0) >= ((-4) + tmp104)) + ((-4) + tmp104) * (((-4) + tmp104) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp107 = tmp106 * tmp47 tmp108 = tmp95 + tmp107 tl.store(in_out_ptr0 + (x5), tmp108, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [base_grid], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, 32, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [shift], Original ATen: [aten.randint] buf1 = torch.ops.aten.randint.low(0, 9, [4, 1, 1, 2], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = buf10; del buf10 # reuse buf15 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x_1, grid_sample], Original ATen: [aten.replication_pad2d, aten.grid_sampler_2d] triton_poi_fused_grid_sampler_2d_replication_pad2d_1.run(buf15, buf0, buf2, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 del buf0 del buf2 return (buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RandomShiftsAug(nn.Module): def __init__(self, pad): super().__init__() self.pad = pad def forward(self, x): x = x.float() n, _c, h, w = x.size() assert h == w padding = tuple([self.pad] * 4) x = F.pad(x, padding, 'replicate') eps = 1.0 / (h + 2 * self.pad) arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * self.pad, device=x.device, dtype=x.dtype)[:h] arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1) shift = torch.randint(0, 2 * self.pad + 1, size=(n, 1, 1, 2), device=x.device, dtype=x.dtype) shift *= 2.0 / (h + 2 * self.pad) grid = base_grid + shift return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'pad': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = 6.0 tmp8 = tmp6 < tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp6 * tmp9 tmp11 = -0.9166666666666666 tmp12 = tmp10 + tmp11 tmp13 = 11 + -1 * x1 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp9 tmp16 = 0.9166666666666666 tmp17 = tmp16 - tmp15 tmp18 = tl.where(tmp8, tmp12, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp24 = x2 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp7 tmp27 = tmp25 * tmp9 tmp28 = tmp27 + tmp11 tmp29 = 11 + -1 * x2 tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp9 tmp32 = tmp16 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp21, tmp33, tmp34) tmp36 = tl.where(tmp4, tmp20, tmp35) tl.store(out_ptr0 + x4, tmp36, xmask) @triton.jit def triton_poi_fused_grid_sampler_2d_replication_pad2d_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x5 = xindex x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last' ) tmp2 = 0.16666666666666666 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 6.0 tmp6 = tmp4 * tmp5 tmp7 = 5.5 tmp8 = tmp6 + tmp7 tmp9 = libdevice.floor(tmp8) tmp10 = 0.0 tmp11 = tmp9 >= tmp10 tmp12 = 12.0 tmp13 = tmp9 < tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp18 = tmp17 * tmp5 tmp19 = tmp18 + tmp7 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20 >= tmp10 tmp22 = tmp20 < tmp12 tmp23 = tmp21 & tmp22 tmp24 = tmp13 & tmp23 tmp25 = tmp11 & tmp24 tmp26 = 1.0 tmp27 = tmp9 + tmp26 tmp28 = tmp27 - tmp8 tmp29 = tmp20 + tmp26 tmp30 = tmp29 - tmp19 tmp31 = tmp28 * tmp30 tmp32 = tl.where(tmp25, tmp31, tmp10) tmp33 = tmp27 >= tmp10 tmp34 = tmp27 < tmp12 tmp35 = tmp34 & tmp23 tmp36 = tmp33 & tmp35 tmp37 = tmp8 - tmp9 tmp38 = tmp37 * tmp30 tmp39 = tl.where(tmp36, tmp38, tmp10) tmp40 = tmp29 >= tmp10 tmp41 = tmp29 < tmp12 tmp42 = tmp40 & tmp41 tmp43 = tmp34 & tmp42 tmp44 = tmp33 & tmp43 tmp45 = tmp19 - tmp20 tmp46 = tmp37 * tmp45 tmp47 = tl.where(tmp44, tmp46, tmp10) tmp48 = tmp13 & tmp42 tmp49 = tmp11 & tmp48 tmp50 = tmp28 * tmp45 tmp51 = tl.where(tmp49, tmp50, tmp10) tmp52 = tmp20.to(tl.int64) tmp53 = tl.full([1], 0, tl.int64) tmp54 = tl.where(tmp25, tmp52, tmp53) tmp55 = tl.full([XBLOCK], 12, tl.int32) tmp56 = tmp54 + tmp55 tmp57 = tmp54 < 0 tmp58 = tl.where(tmp57, tmp56, tmp54) tl.device_assert((0 <= tmp58) & (tmp58 < 12) | ~xmask, 'index out of bounds: 0 <= tmp58 < 12') tmp60 = tmp9.to(tl.int64) tmp61 = tl.where(tmp25, tmp60, tmp53) tmp62 = tmp61 + tmp55 tmp63 = tmp61 < 0 tmp64 = tl.where(tmp63, tmp62, tmp61) tl.device_assert((0 <= tmp64) & (tmp64 < 12) | ~xmask, 'index out of bounds: 0 <= tmp64 < 12') tmp66 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 + tmp58 > 0)) + (0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 + tmp58 > 0)) * (0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 + tmp58 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 + tmp64 > 0)) + (0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 + tmp64 > 0)) * (0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 + tmp64 > 0) < 3))), xmask, eviction_policy='evict_last') tmp67 = tl.where(tmp36, tmp52, tmp53) tmp68 = tmp27.to(tl.int64) tmp69 = tl.where(tmp36, tmp68, tmp53) tmp70 = tmp29.to(tl.int64) tmp71 = tl.where(tmp49, tmp70, tmp53) tmp72 = tl.where(tmp49, tmp60, tmp53) tmp73 = tmp66 * tmp32 tmp74 = tmp67 + tmp55 tmp75 = tmp67 < 0 tmp76 = tl.where(tmp75, tmp74, tmp67) tl.device_assert((0 <= tmp76) & (tmp76 < 12) | ~xmask, 'index out of bounds: 0 <= tmp76 < 12') tmp78 = tmp69 + tmp55 tmp79 = tmp69 < 0 tmp80 = tl.where(tmp79, tmp78, tmp69) tl.device_assert((0 <= tmp80) & (tmp80 < 12) | ~xmask, 'index out of bounds: 0 <= tmp80 < 12') tmp82 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 + tmp76 > 0)) + (0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 + tmp76 > 0)) * (0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 + tmp76 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 + tmp80 > 0)) + (0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 + tmp80 > 0)) * (0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 + tmp80 > 0) < 3))), xmask, eviction_policy='evict_last') tmp83 = tmp82 * tmp39 tmp84 = tmp73 + tmp83 tmp85 = tmp71 + tmp55 tmp86 = tmp71 < 0 tmp87 = tl.where(tmp86, tmp85, tmp71) tl.device_assert((0 <= tmp87) & (tmp87 < 12) | ~xmask, 'index out of bounds: 0 <= tmp87 < 12') tmp89 = tmp72 + tmp55 tmp90 = tmp72 < 0 tmp91 = tl.where(tmp90, tmp89, tmp72) tl.device_assert((0 <= tmp91) & (tmp91 < 12) | ~xmask, 'index out of bounds: 0 <= tmp91 < 12') tmp93 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 + tmp87 > 0)) + (0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 + tmp87 > 0)) * (0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 + tmp87 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 + tmp91 > 0)) + (0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 + tmp91 > 0)) * (0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 + tmp91 > 0) < 3))), xmask, eviction_policy='evict_last') tmp94 = tmp93 * tmp51 tmp95 = tmp84 + tmp94 tmp96 = tl.where(tmp44, tmp70, tmp53) tmp97 = tl.where(tmp44, tmp68, tmp53) tmp98 = tmp96 + tmp55 tmp99 = tmp96 < 0 tmp100 = tl.where(tmp99, tmp98, tmp96) tl.device_assert((0 <= tmp100) & (tmp100 < 12) | ~xmask, 'index out of bounds: 0 <= tmp100 < 12') tmp102 = tmp97 + tmp55 tmp103 = tmp97 < 0 tmp104 = tl.where(tmp103, tmp102, tmp97) tl.device_assert((0 <= tmp104) & (tmp104 < 12) | ~xmask, 'index out of bounds: 0 <= tmp104 < 12') tmp106 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp100) + (-4 + tmp100) * (-4 + tmp100 > 0)) + (0 * (0 >= -4 + tmp100) + (-4 + tmp100) * (-4 + tmp100 > 0)) * (0 * (0 >= -4 + tmp100) + (-4 + tmp100) * (-4 + tmp100 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0)) + (0 * (0 >= -4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0)) * (0 * (0 >= -4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0) < 3))), xmask, eviction_policy='evict_last') tmp107 = tmp106 * tmp47 tmp108 = tmp95 + tmp107 tl.store(in_out_ptr0 + x5, tmp108, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) buf1 = torch.ops.aten.randint.low(0, 9, [4, 1, 1, 2], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = buf10 del buf10 buf15 = buf11 del buf11 triton_poi_fused_grid_sampler_2d_replication_pad2d_1[grid(256)](buf15, buf0, buf2, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf0 del buf2 return buf15, class RandomShiftsAugNew(nn.Module): def __init__(self, pad): super().__init__() self.pad = pad def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
rajeswar18/url_benchmark
RandomShiftsAug
false
16,313
[ "MIT" ]
180
2fdfd82a9067222106ef7627f71b1e1ae5d70a85
https://github.com/rajeswar18/url_benchmark/tree/2fdfd82a9067222106ef7627f71b1e1ae5d70a85
L2Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/re/creegezzx7dnggpoax3tzz6nq4bsjbvze4pnsihzcwedlmlzri2l.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] # Source node to ATen node mapping: # x => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp0 / tmp15 tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init import torch.nn class L2Norm(nn.Module): def __init__(self): super(L2Norm, self).__init__() self.eps = 1e-10 def forward(self, x): norm = torch.sqrt(torch.abs(torch.sum(x * x, dim=1)) + self.eps) x = x / norm.unsqueeze(1).expand_as(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.init import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp0 / tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class L2NormNew(nn.Module): def __init__(self): super(L2NormNew, self).__init__() self.eps = 1e-10 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
rdguez-mariano/affnet
L2Norm
false
16,314
[ "MIT" ]
211
a3f0bb32d9001d1daf024f38d29867f37816ea78
https://github.com/rdguez-mariano/affnet/tree/a3f0bb32d9001d1daf024f38d29867f37816ea78
GlobalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ip/cip3p4ibqio6uu76ccsemd7wjusq5ptlow3dt2zxzouyuz2sqywf.py # Topologically Sorted Source Nodes: [concat_c], Original ATen: [aten.cat] # Source node to ATen node mapping: # concat_c => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %primals_1], 2), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f5/cf5pnuv5il7avsmzck3quom7r6zvcfuulsdwpzlv2epzfmcgqgwb.py # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_h_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x3), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u4/cu4fypgfipklcxtitafatnyqdaatx5tws6qfndqotcy4qivcph6d.py # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # align_vectors_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [c], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [concat_c], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 128, grid=grid(128), stream=stream0) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [attn_h], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf2, buf7, 64, grid=grid(64), stream=stream0) del buf2 return (buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttention(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Luong Attention (dot, general): The full function is $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$. * dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$ * general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$ Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, is_transform_out, attn_type='dot', attn_hidden=0): super(GlobalAttention, self).__init__() self.dim = dim self.attn_type = attn_type self.attn_hidden = attn_hidden assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if attn_hidden > 0: self.transform_in = nn.Sequential(nn.Linear(dim, attn_hidden), nn.ELU(0.1)) if self.attn_type == 'general': d = attn_hidden if attn_hidden > 0 else dim self.linear_in = nn.Linear(d, d, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' if is_transform_out: self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) else: self.linear_out = None self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None def applyMask(self, mask): self.mask = mask def applyMaskBySeqBatch(self, q): self.applyMask(q.data.eq(table.IO.PAD).t().contiguous().unsqueeze(0)) def score(self, h_t, h_s): """ h_t (FloatTensor): batch x tgt_len x dim h_s (FloatTensor): batch x src_len x dim returns scores (FloatTensor): batch x tgt_len x src_len: raw attention scores for each src index """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_hidden > 0: h_t = self.transform_in(h_t) h_s = self.transform_in(h_s) if self.attn_type == 'general': h_t = self.linear_in(h_t) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input, context): """ input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output. context (FloatTensor): batch x src_len x dim: src hidden states """ if input.dim() == 2: one_step = True input = input.unsqueeze(1) else: one_step = False batch, sourceL, dim = context.size() batch_, targetL, dim_ = input.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if self.mask is not None: beam_, batch_, sourceL_ = self.mask.size() aeq(batch, batch_ * beam_) aeq(sourceL, sourceL_) align = self.score(input, context) if self.mask is not None: mask_ = self.mask.view(batch, 1, sourceL) align.data.masked_fill_(mask_, -float('inf')) align_vectors = self.sm(align.view(batch * targetL, sourceL)) align_vectors = align_vectors.view(batch, targetL, sourceL) c = torch.bmm(align_vectors, context) concat_c = torch.cat([c, input], 2) if self.linear_out is None: attn_h = concat_c else: attn_h = self.linear_out(concat_c) if self.attn_type in ['general', 'dot']: attn_h = self.tanh(attn_h) if one_step: attn_h = attn_h.squeeze(1) align_vectors = align_vectors.squeeze(1) batch_, dim_ = attn_h.size() aeq(batch, batch_) batch_, sourceL_ = align_vectors.size() aeq(batch, batch_) aeq(sourceL, sourceL_) else: attn_h = attn_h.transpose(0, 1).contiguous() align_vectors = align_vectors.transpose(0, 1).contiguous() targetL_, batch_, dim_ = attn_h.size() aeq(targetL, targetL_) aeq(batch, batch_) targetL_, batch_, sourceL_ = align_vectors.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(sourceL, sourceL_) return attn_h, align_vectors def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'is_transform_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x3, tmp1, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5 def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttentionNew(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Luong Attention (dot, general): The full function is $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$. * dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$ * general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$ Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, is_transform_out, attn_type='dot', attn_hidden=0): super(GlobalAttentionNew, self).__init__() self.dim = dim self.attn_type = attn_type self.attn_hidden = attn_hidden assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if attn_hidden > 0: self.transform_in = nn.Sequential(nn.Linear(dim, attn_hidden), nn.ELU(0.1)) if self.attn_type == 'general': d = attn_hidden if attn_hidden > 0 else dim self.linear_in = nn.Linear(d, d, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' if is_transform_out: self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) else: self.linear_out = None self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None def applyMask(self, mask): self.mask = mask def applyMaskBySeqBatch(self, q): self.applyMask(q.data.eq(table.IO.PAD).t().contiguous().unsqueeze(0)) def score(self, h_t, h_s): """ h_t (FloatTensor): batch x tgt_len x dim h_s (FloatTensor): batch x src_len x dim returns scores (FloatTensor): batch x tgt_len x src_len: raw attention scores for each src index """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_hidden > 0: h_t = self.transform_in(h_t) h_s = self.transform_in(h_s) if self.attn_type == 'general': h_t = self.linear_in(h_t) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
rajasagashe/coarse2fine
GlobalAttention
false
16,315
[ "MIT" ]
164
d6c51a3073df9018e32c95c257c68b0d69d9aa46
https://github.com/rajasagashe/coarse2fine/tree/d6c51a3073df9018e32c95c257c68b0d69d9aa46
LearnableTimeDepWeightedCost
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ar/car4w6k5xtvxdgstpntr74b3lsmdap665zjhk74ct4st3net2y3x.py # Topologically Sorted Source Nodes: [sub, pow_1, mse, wmse, mean], Original ATen: [aten.sub, aten.pow, aten.squeeze, aten.mul, aten.mean] # Source node to ATen node mapping: # mean => mean # mse => squeeze # pow_1 => pow_1 # sub => sub # wmse => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %slice_3), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %squeeze : [num_users=2] = call_function[target=torch.ops.aten.squeeze.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_mean_mul_pow_squeeze_sub_0 = async_compile.triton('triton_per_fused_mean_mul_pow_squeeze_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_squeeze_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_pow_squeeze_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r0), rmask, other=0.0) tmp4 = tl.load(in_ptr2 + (r0), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp3 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 36.0 tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp3, rmask) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 9), (9, 1)) assert_size_stride(primals_2, (4, 9), (9, 1)) assert_size_stride(primals_3, (4, 9), (9, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 9), (9, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, mse, wmse, mean], Original ATen: [aten.sub, aten.pow, aten.squeeze, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_pow_squeeze_sub_0.run(buf2, primals_1, primals_2, primals_3, buf0, 1, 36, grid=grid(1), stream=stream0) del primals_1 del primals_2 del primals_3 return (buf2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 9), (9, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 9), (9, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 9), (9, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class LearnableTimeDepWeightedCost(torch.nn.Module): def __init__(self, time_horizon, dim=9, weights=None): super(LearnableTimeDepWeightedCost, self).__init__() if weights is None: self.weights = torch.nn.Parameter(0.01 * torch.ones([ time_horizon, dim])) else: self.weights = weights self.clip = torch.nn.ReLU() self.dim = dim self.meta_grads = [[] for _, _ in enumerate(self.parameters())] def forward(self, y_in, y_target): assert y_in.dim() == 2 mse = ((y_in[:, -self.dim:] - y_target[-self.dim:]) ** 2).squeeze() wmse = mse * self.weights return wmse.mean() def get_inputs(): return [torch.rand([4, 9]), torch.rand([4, 9])] def get_init_inputs(): return [[], {'time_horizon': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_pow_squeeze_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp1 = tl.load(in_ptr1 + r0, rmask, other=0.0) tmp4 = tl.load(in_ptr2 + r0, rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp3 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 36.0 tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp3, rmask) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 9), (9, 1)) assert_size_stride(primals_2, (4, 9), (9, 1)) assert_size_stride(primals_3, (4, 9), (9, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 9), (9, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_mean_mul_pow_squeeze_sub_0[grid(1)](buf2, primals_1, primals_2, primals_3, buf0, 1, 36, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 return buf2, buf0 class LearnableTimeDepWeightedCostNew(torch.nn.Module): def __init__(self, time_horizon, dim=9, weights=None): super(LearnableTimeDepWeightedCostNew, self).__init__() if weights is None: self.weights = torch.nn.Parameter(0.01 * torch.ones([ time_horizon, dim])) else: self.weights = weights self.clip = torch.nn.ReLU() self.dim = dim self.meta_grads = [[] for _, _ in enumerate(self.parameters())] def forward(self, input_0, input_1): primals_1 = self.weights primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ricklentz/LearningToLearn
LearnableTimeDepWeightedCost
false
16,317
[ "MIT" ]
76
fa32b98b40402fa15982b450ed09d9d3735ec924
https://github.com/ricklentz/LearningToLearn/tree/fa32b98b40402fa15982b450ed09d9d3735ec924
CmapPafHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ca/ccalm3hakjc3y6pyouzotjhedyolmiuxj66frcn7j2bllcgh7gjc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (1048576*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pi/cpiqq6ydqends5jgnpbakdwghpnovztoymnjrsnfd3ycpt6b7cp7.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4q/c4qmtshyh2quqp4mfbem56kakgs5tn4mkrnwi3suy4oa6joff42a.py # Topologically Sorted Source Nodes: [conv2d, ac, conv2d_1, ap, mul, mul_1], Original ATen: [aten.convolution, aten.sigmoid, aten.tanh, aten.mul] # Source node to ATen node mapping: # ac => sigmoid # ap => tanh # conv2d => convolution # conv2d_1 => convolution_1 # mul => mul # mul_1 => mul_1 # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %tanh), kwargs = {}) triton_poi_fused_convolution_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_tanh_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x2), None) tmp4 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x2), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp5) tmp8 = tmp6 * tmp7 tmp9 = libdevice.tanh(tmp2) tmp10 = tmp6 * tmp9 tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(in_out_ptr1 + (x2), tmp5, None) tl.store(out_ptr0 + (x2), tmp8, None) tl.store(out_ptr1 + (x2), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pn/cpngdclw7lzjwmfou3d3kip6qoxuvwgrgxlt5gs2kd5udjsfz5ny.py # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16384*y1)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 1024, 4096, grid=grid(1024, 4096), stream=stream0) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_4, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5; del buf5 # reuse buf4 = buf3; del buf3 # reuse buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) # Topologically Sorted Source Nodes: [conv2d, ac, conv2d_1, ap, mul, mul_1], Original ATen: [aten.convolution, aten.sigmoid, aten.tanh, aten.mul] triton_poi_fused_convolution_mul_sigmoid_tanh_2.run(buf6, buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, grid=grid(4194304), stream=stream0) del primals_3 del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4)) buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf8, primals_7, buf9, 16, 4096, grid=grid(16, 4096), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4)) buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf11, primals_9, buf12, 16, 4096, grid=grid(16, 4096), stream=stream0) del buf11 del primals_9 return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6, buf7, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadAttention(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadAttention, self).__init__() self.cmap_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.paf_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.cmap_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): xc = self.cmap_up(x) ac = torch.sigmoid(self.cmap_att(xc)) xp = self.paf_up(x) ap = torch.tanh(self.paf_att(xp)) return self.cmap_conv(xc * ac), self.paf_conv(xp * ap) def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x2, None) tmp4 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp5) tmp8 = tmp6 * tmp7 tmp9 = libdevice.tanh(tmp2) tmp10 = tmp6 * tmp9 tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(in_out_ptr1 + x2, tmp5, None) tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_4, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 buf4 = buf3 del buf3 buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) triton_poi_fused_convolution_mul_sigmoid_tanh_2[grid(4194304)](buf6, buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, XBLOCK= 512, num_warps=8, num_stages=1) del primals_3 del primals_5 buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4)) buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_3[grid(16, 4096)](buf8, primals_7, buf9, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4)) buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64, 1), 0) del buf8 triton_poi_fused_convolution_3[grid(16, 4096)](buf11, primals_9, buf12, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del buf11 del primals_9 return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6, buf7, buf10) class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadAttentionNew(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadAttentionNew, self).__init__() self.cmap_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.paf_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.cmap_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_2 = self.cmap_att.weight primals_3 = self.cmap_att.bias primals_4 = self.paf_att.weight primals_5 = self.paf_att.bias primals_6 = self.cmap_conv.weight primals_7 = self.cmap_conv.bias primals_8 = self.paf_conv.weight primals_9 = self.paf_conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
quantd2/trt_pose
CmapPafHeadAttention
false
16,318
[ "MIT" ]
738
44c5e826977f20c8dad2d9725313a18cb2189853
https://github.com/quantd2/trt_pose/tree/44c5e826977f20c8dad2d9725313a18cb2189853
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wl/cwl3r3y2pgt376us4loda5kjeqzzwgynhcjamefpeshkd44ofpoz.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_9, 1.0), kwargs = {}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ym/cym2aoh2473azgl5qufcsfqa4qgp5bji7hptnotbmkegfc7h3xan.py # Topologically Sorted Source Nodes: [contiguous_2, view_5], Original ATen: [aten.clone, aten.view] # Source node to ATen node mapping: # contiguous_2 => clone # view_5 => view_20 # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) # %view_20 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [-1, 4]), kwargs = {}) triton_poi_fused_clone_view_3 = async_compile.triton('triton_poi_fused_clone_view_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_view_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf3, 64, grid=grid(64), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), out=buf8) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2, view_5], Original ATen: [aten.clone, aten.view] triton_poi_fused_clone_view_3.run(buf8, buf9, 16, 4, grid=grid(16, 4), stream=stream0) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 4), (4, 1), 0), out=buf10) return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), primals_1, buf7, reinterpret_tensor(buf0, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn class MultiHeadAttention(nn.Module): def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim =None): super(MultiHeadAttention, self).__init__() if val_dim is None: val_dim = embed_dim // n_heads if key_dim is None: key_dim = val_dim self.n_heads = n_heads self.input_dim = input_dim self.embed_dim = embed_dim self.val_dim = val_dim self.key_dim = key_dim self.norm_factor = 1 / math.sqrt(key_dim) self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim)) self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim)) self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1.0 / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, q, h=None, mask=None): """ :param q: queries (batch_size, n_query, input_dim) :param h: data (batch_size, graph_size, input_dim) :param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1) Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency) :return: """ if h is None: h = q batch_size, graph_size, input_dim = h.size() n_query = q.size(1) assert q.size(0) == batch_size assert q.size(2) == input_dim assert input_dim == self.input_dim, 'Wrong embedding dimension of input' hflat = h.contiguous().view(-1, input_dim) qflat = q.contiguous().view(-1, input_dim) shp = self.n_heads, batch_size, graph_size, -1 shp_q = self.n_heads, batch_size, n_query, -1 V = torch.matmul(hflat, self.W_val).view(shp) if graph_size > 1: Q = torch.matmul(qflat, self.W_query).view(shp_q) K = torch.matmul(hflat, self.W_key).view(shp) compatibility = self.norm_factor * torch.matmul(Q, K.transpose( 2, 3)) if mask is not None: mask = mask.view(1, batch_size, n_query, graph_size).expand_as( compatibility) compatibility[mask] = -np.inf attn = torch.softmax(compatibility, dim=-1) if mask is not None: attnc = attn.clone() attnc[mask] = 0 attn = attnc heads = torch.matmul(attn, V) else: heads = V.repeat(1, 1, n_query, 1) out = torch.mm(heads.permute(1, 2, 0, 3).contiguous().view(-1, self .n_heads * self.val_dim), self.W_out.view(-1, self.embed_dim) ).view(batch_size, n_query, self.embed_dim) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'input_dim': 4, 'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_view_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_0[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf2 triton_poi_fused_0[grid(64)](buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), out=buf8) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_clone_view_3[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 4), (4, 1 ), 0), out=buf10) return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), primals_1, buf7, reinterpret_tensor(buf0, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor( primals_5, (4, 4), (1, 4), 0) class MultiHeadAttentionNew(nn.Module): def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim =None): super(MultiHeadAttentionNew, self).__init__() if val_dim is None: val_dim = embed_dim // n_heads if key_dim is None: key_dim = val_dim self.n_heads = n_heads self.input_dim = input_dim self.embed_dim = embed_dim self.val_dim = val_dim self.key_dim = key_dim self.norm_factor = 1 / math.sqrt(key_dim) self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim)) self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim)) self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1.0 / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.W_query primals_3 = self.W_key primals_4 = self.W_val primals_5 = self.W_out primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
rdjdejong/attention-learn-to-route
MultiHeadAttention
false
16,319
[ "MIT" ]
540
3b6bbdad677a36df53eabad98b48f436be298ac8
https://github.com/rdjdejong/attention-learn-to-route/tree/3b6bbdad677a36df53eabad98b48f436be298ac8
LocalNorm2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nf/cnfwjpjms2qnchy6s44kg5itdmzfj527avquk7lfljb2ontjhe6a.py # Topologically Sorted Source Nodes: [pad, mul, pad_1], Original ATen: [aten.reflection_pad2d, aten.mul] # Source node to ATen node mapping: # mul => mul # pad => _unsafe_index, _unsafe_index_1 # pad_1 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mul, [None, None, %sub_6, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_8]), kwargs = {}) triton_poi_fused_mul_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_mul_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 147456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 96 x1 = (xindex // 96) % 96 x2 = (xindex // 9216) x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-16) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-16) + x1))))) + (4096*x2)), None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + (x3), tmp0, None) tl.store(out_ptr1 + (x3), tmp1, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ra/craat4ivmkzxkvqicswz3b75qo5ol23dv5vmq3zr4jii2tr2sxrx.py # Topologically Sorted Source Nodes: [sub, mul_1, sub_1, abs_1, sqrt, add, truediv, clamp], Original ATen: [aten.sub, aten.mul, aten.abs, aten.sqrt, aten.add, aten.div, aten.clamp] # Source node to ATen node mapping: # abs_1 => abs_9 # add => add # clamp => clamp_max, clamp_min # mul_1 => mul_1 # sqrt => sqrt # sub => sub_4 # sub_1 => sub_9 # truediv => div # Graph fragment: # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %avg_pool2d), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, %avg_pool2d), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_1, %mul_1), kwargs = {}) # %abs_9 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_9,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%abs_9,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_4, %add), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -6.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_out_ptr0 + (x0), None) tmp3 = tl.load(in_ptr1 + (x0), None) tmp2 = tmp0 - tmp1 tmp4 = tmp1 * tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = libdevice.sqrt(tmp6) tmp8 = 1e-10 tmp9 = tmp7 + tmp8 tmp10 = tmp2 / tmp9 tmp11 = -6.0 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = 6.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(in_out_ptr0 + (x0), tmp14, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 96, 96), (36864, 9216, 96, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 96, 96), (36864, 9216, 96, 1), torch.float32) # Topologically Sorted Source Nodes: [pad, mul, pad_1], Original ATen: [aten.reflection_pad2d, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0.run(arg0_1, buf0, buf3, 147456, grid=grid(147456), stream=stream0) # Topologically Sorted Source Nodes: [pad, mean], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d] buf1 = torch.ops.aten.avg_pool2d.default(buf0, [33, 33], [1, 1], [0, 0], False, True, None) del buf0 buf2 = buf1 del buf1 # Topologically Sorted Source Nodes: [mul, pad_1, avg_pool2d_1], Original ATen: [aten.mul, aten.reflection_pad2d, aten.avg_pool2d] buf4 = torch.ops.aten.avg_pool2d.default(buf3, [33, 33], [1, 1], [0, 0], False, True, None) del buf3 buf5 = buf4 del buf4 buf6 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [sub, mul_1, sub_1, abs_1, sqrt, add, truediv, clamp], Original ATen: [aten.sub, aten.mul, aten.abs, aten.sqrt, aten.add, aten.div, aten.clamp] triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1.run(buf6, arg0_1, buf5, 65536, grid=grid(65536), stream=stream0) del arg0_1 del buf5 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init import torch.nn class LocalNorm2d(nn.Module): def __init__(self, kernel_size=33): super(LocalNorm2d, self).__init__() self.ks = kernel_size self.pool = nn.AvgPool2d(kernel_size=self.ks, stride=1, padding=0) self.eps = 1e-10 return def forward(self, x): pd = int(self.ks / 2) mean = self.pool(F.pad(x, (pd, pd, pd, pd), 'reflect')) return torch.clamp((x - mean) / (torch.sqrt(torch.abs(self.pool(F. pad(x * x, (pd, pd, pd, pd), 'reflect')) - mean * mean)) + self .eps), min=-6.0, max=6.0) def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.init import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 96 x1 = xindex // 96 % 96 x2 = xindex // 9216 x3 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-16 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-16 + x1)) + 4096 * x2), None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x3, tmp0, None) tl.store(out_ptr1 + x3, tmp1, None) @triton.jit def triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp3 = tl.load(in_ptr1 + x0, None) tmp2 = tmp0 - tmp1 tmp4 = tmp1 * tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = libdevice.sqrt(tmp6) tmp8 = 1e-10 tmp9 = tmp7 + tmp8 tmp10 = tmp2 / tmp9 tmp11 = -6.0 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = 6.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(in_out_ptr0 + x0, tmp14, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 96, 96), (36864, 9216, 96, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 96, 96), (36864, 9216, 96, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0[grid(147456)](arg0_1, buf0, buf3, 147456, XBLOCK=512, num_warps=8, num_stages=1) buf1 = torch.ops.aten.avg_pool2d.default(buf0, [33, 33], [1, 1], [0, 0], False, True, None) del buf0 buf2 = buf1 del buf1 buf4 = torch.ops.aten.avg_pool2d.default(buf3, [33, 33], [1, 1], [0, 0], False, True, None) del buf3 buf5 = buf4 del buf4 buf6 = buf2 del buf2 triton_poi_fused_abs_add_clamp_div_mul_sqrt_sub_1[grid(65536)](buf6, arg0_1, buf5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del arg0_1 del buf5 return buf6, class LocalNorm2dNew(nn.Module): def __init__(self, kernel_size=33): super(LocalNorm2dNew, self).__init__() self.ks = kernel_size self.pool = nn.AvgPool2d(kernel_size=self.ks, stride=1, padding=0) self.eps = 1e-10 return def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
rdguez-mariano/affnet
LocalNorm2d
false
16,320
[ "MIT" ]
211
a3f0bb32d9001d1daf024f38d29867f37816ea78
https://github.com/rdguez-mariano/affnet/tree/a3f0bb32d9001d1daf024f38d29867f37816ea78
HessianResp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3r/c3r7fmtptnxwmckowyj4fuu3isbwcejds77mwjmlle75izelxh2g.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad2d_0 = async_compile.triton('triton_poi_fused_replication_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 4 x2 = (xindex // 24) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (x1)) + (x1) * ((x1) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0))))) + (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) * ((((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4g/c4g73cvqopsl2ssamaej6vol34yskyzuy2xsypskqd3m4tnbbul6.py # Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # pad_1 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad2d_1 = async_compile.triton('triton_poi_fused_replication_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 6 x2 = (xindex // 24) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0))))) + (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) * ((((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (x0)) + (x0) * ((x0) < (3)))), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/n2/cn2d75w37w4pdb44rjpnb26dtohaaykj7ag4tjwuzprf4ihxpt42.py # Topologically Sorted Source Nodes: [mul, mul_1, sub, abs_1, pow_1, mul_2], Original ATen: [aten.mul, aten.sub, aten.abs, aten.pow] # Source node to ATen node mapping: # abs_1 => abs_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %convolution_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, %convolution_3), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %pow_1), kwargs = {}) triton_poi_fused_abs_mul_pow_sub_2 = async_compile.triton('triton_poi_fused_abs_mul_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_mul_pow_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_mul_pow_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x3), xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp8 = tmp7 * tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp6 * tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 1, 3), (3, 3, 3, 1)) assert_size_stride(primals_3, (1, 1, 3, 1), (3, 3, 1, 1)) assert_size_stride(primals_4, (1, 1, 1, 3), (3, 3, 3, 1)) assert_size_stride(primals_5, (1, 1, 3, 1), (3, 3, 1, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 6), (24, 24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad2d_0.run(primals_1, buf0, 96, grid=grid(96), stream=stream0) # Topologically Sorted Source Nodes: [gxx], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = empty_strided_cuda((4, 1, 6, 4), (24, 24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.replication_pad2d] triton_poi_fused_replication_pad2d_1.run(primals_1, buf2, 96, grid=grid(96), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [gyy], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1)) buf5 = empty_strided_cuda((4, 1, 6, 4), (24, 24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pad_3], Original ATen: [aten.replication_pad2d] triton_poi_fused_replication_pad2d_1.run(buf4, buf5, 96, grid=grid(96), stream=stream0) # Topologically Sorted Source Nodes: [gxy], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, sub, abs_1, pow_1, mul_2], Original ATen: [aten.mul, aten.sub, aten.abs, aten.pow] triton_poi_fused_abs_mul_pow_sub_2.run(buf1, buf3, buf6, primals_6, buf7, 256, grid=grid(256), stream=stream0) return (buf7, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf1, buf2, buf3, buf4, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 1, 3), (3, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 1, 3, 1), (3, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1, 1, 3), (3, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 1, 3, 1), (3, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.nn.init import torch.nn class HessianResp(nn.Module): def __init__(self): super(HessianResp, self).__init__() self.gx = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False) self.gx.weight.data = torch.from_numpy(np.array([[[[0.5, 0, -0.5]]] ], dtype=np.float32)) self.gy = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False) self.gy.weight.data = torch.from_numpy(np.array([[[[0.5], [0], [- 0.5]]]], dtype=np.float32)) self.gxx = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False) self.gxx.weight.data = torch.from_numpy(np.array([[[[1.0, -2.0, 1.0 ]]]], dtype=np.float32)) self.gyy = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False) self.gyy.weight.data = torch.from_numpy(np.array([[[[1.0], [-2.0], [1.0]]]], dtype=np.float32)) return def forward(self, x, scale): gxx = self.gxx(F.pad(x, (1, 1, 0, 0), 'replicate')) gyy = self.gyy(F.pad(x, (0, 0, 1, 1), 'replicate')) gxy = self.gy(F.pad(self.gx(F.pad(x, (1, 1, 0, 0), 'replicate')), ( 0, 0, 1, 1), 'replicate')) return torch.abs(gxx * gyy - gxy * gxy) * scale ** 4 def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.init import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 4 x2 = xindex // 24 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * x2 + (3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * ( 0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 6 x2 = xindex // 24 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= x0) + x0 * (x0 < 3))), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_abs_mul_pow_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr3 + x3, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp8 = tmp7 * tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp6 * tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 1, 3), (3, 3, 3, 1)) assert_size_stride(primals_3, (1, 1, 3, 1), (3, 3, 1, 1)) assert_size_stride(primals_4, (1, 1, 1, 3), (3, 3, 3, 1)) assert_size_stride(primals_5, (1, 1, 3, 1), (3, 3, 1, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 6), (24, 24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(96)](primals_1, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = empty_strided_cuda((4, 1, 6, 4), (24, 24, 4, 1), torch.float32) triton_poi_fused_replication_pad2d_1[grid(96)](primals_1, buf2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1)) buf4 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1)) buf5 = empty_strided_cuda((4, 1, 6, 4), (24, 24, 4, 1), torch.float32) triton_poi_fused_replication_pad2d_1[grid(96)](buf4, buf5, 96, XBLOCK=128, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_abs_mul_pow_sub_2[grid(256)](buf1, buf3, buf6, primals_6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return (buf7, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf1, buf2, buf3, buf4, buf5, buf6) class HessianRespNew(nn.Module): def __init__(self): super(HessianRespNew, self).__init__() self.gx = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False) self.gx.weight.data = torch.from_numpy(np.array([[[[0.5, 0, -0.5]]] ], dtype=np.float32)) self.gy = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False) self.gy.weight.data = torch.from_numpy(np.array([[[[0.5], [0], [- 0.5]]]], dtype=np.float32)) self.gxx = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False) self.gxx.weight.data = torch.from_numpy(np.array([[[[1.0, -2.0, 1.0 ]]]], dtype=np.float32)) self.gyy = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False) self.gyy.weight.data = torch.from_numpy(np.array([[[[1.0], [-2.0], [1.0]]]], dtype=np.float32)) return def forward(self, input_0, input_1): primals_2 = self.gx.weight primals_3 = self.gy.weight primals_4 = self.gxx.weight primals_5 = self.gyy.weight primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
rdguez-mariano/affnet
HessianResp
false
16,321
[ "MIT" ]
211
a3f0bb32d9001d1daf024f38d29867f37816ea78
https://github.com/rdguez-mariano/affnet/tree/a3f0bb32d9001d1daf024f38d29867f37816ea78
Conv2dBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/cs/ccsak2aq2focic3gvi5yypd2u37w22ixutbqzqc6vdjhrk4zppac.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 9) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 144, grid=grid(144), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b).type_as(x) running_var = self.running_var.repeat(b).type_as(x) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True, fp16=False): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.fp16 = fp16 if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.type() == 'torch.cuda.HalfTensor': mean = x.view(-1).float().mean().view(*shape) std = x.view(-1).float().std().view(*shape) mean = mean.half() std = std.half() else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero', dilation=1, fp16=False): super(Conv2dBlock, self).__init__() self.use_bias = True norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim, fp16=fp16) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, dilation=dilation, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=1, dilation=dilation, bias=self.use_bias) def forward(self, x): x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(144)](buf1, primals_2, buf2, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b).type_as(x) running_var = self.running_var.repeat(b).type_as(x) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True, fp16=False): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.fp16 = fp16 if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.type() == 'torch.cuda.HalfTensor': mean = x.view(-1).float().mean().view(*shape) std = x.view(-1).float().std().view(*shape) mean = mean.half() std = std.half() else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class Conv2dBlockNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero', dilation=1, fp16=False): super(Conv2dBlockNew, self).__init__() self.use_bias = True norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim, fp16=fp16) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, dilation=dilation, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=1, dilation=dilation, bias=self.use_bias) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ricklentz/Seg-Uncertainty
Conv2dBlock
false
16,322
[ "MIT" ]
298
82fd7056cccb265b3fc3e8a90338866661cab230
https://github.com/ricklentz/Seg-Uncertainty/tree/82fd7056cccb265b3fc3e8a90338866661cab230
Conv2DBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ma/cmayqy264pnaznjghwuvcajrnw2atnmz54epgqujb67ep2gi6kbq.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad2d_0 = async_compile.triton('triton_poi_fused_replication_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) % 8 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-2) + x1)) + ((-2) + x1) * (((-2) + x1) > (0))))) + (((0) * ((0) >= ((-2) + x1)) + ((-2) + x1) * (((-2) + x1) > (0)))) * ((((0) * ((0) >= ((-2) + x1)) + ((-2) + x1) * (((-2) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/42/c42i6kggymcvforsoo45syfc6w3ujwnd3pxalcsjxkelshjyz7gv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad2d_0.run(primals_3, buf0, 1024, grid=grid(1024), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_2, 400, grid=grid(400), stream=stream0) del primals_2 return (buf2, primals_1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def act_layer(act): if act == 'relu': return nn.ReLU() elif act == 'lrelu': return nn.LeakyReLU(LRELU_SLOPE) elif act == 'elu': return nn.ELU() elif act == 'tanh': return nn.Tanh() elif act == 'prelu': return nn.PReLU() else: raise ValueError('%s not recognized.' % act) def norm_layer2d(norm, channels): if norm == 'batch': return nn.BatchNorm2d(channels) elif norm == 'instance': return nn.InstanceNorm2d(channels, affine=True) elif norm == 'layer': return nn.GroupNorm(1, channels, affine=True) elif norm == 'group': return nn.GroupNorm(4, channels, affine=True) else: raise ValueError('%s not recognized.' % norm) class Conv2DBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_sizes, strides, norm=None, activation=None, padding_mode='replicate'): super(Conv2DBlock, self).__init__() padding = kernel_sizes // 2 if isinstance(kernel_sizes, int) else ( kernel_sizes[0] // 2, kernel_sizes[1] // 2) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_sizes, strides, padding=padding, padding_mode=padding_mode) if activation is None: nn.init.xavier_uniform_(self.conv2d.weight, gain=nn.init. calculate_gain('linear')) nn.init.zeros_(self.conv2d.bias) elif activation == 'tanh': nn.init.xavier_uniform_(self.conv2d.weight, gain=nn.init. calculate_gain('tanh')) nn.init.zeros_(self.conv2d.bias) elif activation == 'lrelu': nn.init.kaiming_uniform_(self.conv2d.weight, a=LRELU_SLOPE, nonlinearity='leaky_relu') nn.init.zeros_(self.conv2d.bias) elif activation == 'relu': nn.init.kaiming_uniform_(self.conv2d.weight, nonlinearity='relu') nn.init.zeros_(self.conv2d.bias) else: raise ValueError() self.activation = None self.norm = None if norm is not None: self.norm = norm_layer2d(norm, out_channels) if activation is not None: self.activation = act_layer(activation) def forward(self, x): x = self.conv2d(x) x = self.norm(x) if self.norm is not None else x x = self.activation(x) if self.activation is not None else x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_sizes': 4, 'strides': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0)) + (0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0)) * (0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(1024)](primals_3, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(400)](buf2, primals_2, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, buf0 def act_layer(act): if act == 'relu': return nn.ReLU() elif act == 'lrelu': return nn.LeakyReLU(LRELU_SLOPE) elif act == 'elu': return nn.ELU() elif act == 'tanh': return nn.Tanh() elif act == 'prelu': return nn.PReLU() else: raise ValueError('%s not recognized.' % act) def norm_layer2d(norm, channels): if norm == 'batch': return nn.BatchNorm2d(channels) elif norm == 'instance': return nn.InstanceNorm2d(channels, affine=True) elif norm == 'layer': return nn.GroupNorm(1, channels, affine=True) elif norm == 'group': return nn.GroupNorm(4, channels, affine=True) else: raise ValueError('%s not recognized.' % norm) class Conv2DBlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_sizes, strides, norm=None, activation=None, padding_mode='replicate'): super(Conv2DBlockNew, self).__init__() padding = kernel_sizes // 2 if isinstance(kernel_sizes, int) else ( kernel_sizes[0] // 2, kernel_sizes[1] // 2) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_sizes, strides, padding=padding, padding_mode=padding_mode) if activation is None: nn.init.xavier_uniform_(self.conv2d.weight, gain=nn.init. calculate_gain('linear')) nn.init.zeros_(self.conv2d.bias) elif activation == 'tanh': nn.init.xavier_uniform_(self.conv2d.weight, gain=nn.init. calculate_gain('tanh')) nn.init.zeros_(self.conv2d.bias) elif activation == 'lrelu': nn.init.kaiming_uniform_(self.conv2d.weight, a=LRELU_SLOPE, nonlinearity='leaky_relu') nn.init.zeros_(self.conv2d.bias) elif activation == 'relu': nn.init.kaiming_uniform_(self.conv2d.weight, nonlinearity='relu') nn.init.zeros_(self.conv2d.bias) else: raise ValueError() self.activation = None self.norm = None if norm is not None: self.norm = norm_layer2d(norm, out_channels) if activation is not None: self.activation = act_layer(activation) def forward(self, input_0): primals_1 = self.conv2d.weight primals_2 = self.conv2d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
rll-research/ARM
Conv2DBlock
false
16,323
[ "BSD-3-Clause" ]
46
7a51e00fabdcdbd8ad2b235266c66115e79deeb0
https://github.com/rll-research/ARM/tree/7a51e00fabdcdbd8ad2b235266c66115e79deeb0
ReGLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qk/cqktl6a5kztlpt4hsz4gfob2rxmtfurvnwvwoend2jzbg534exfw.py # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.relu, aten.mul] # Source node to ATen node mapping: # g => relu # x => mul # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %view_3), kwargs = {}) triton_poi_fused_mul_relu_0 = async_compile.triton('triton_poi_fused_mul_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.relu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_relu_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class ReGLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU( ), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class ReGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
robburdon/pytorch_tabular
ReGLU
false
16,324
[ "MIT" ]
560
9bf75f22c6e1b3033ad699713e77c283d55f3555
https://github.com/robburdon/pytorch_tabular/tree/9bf75f22c6e1b3033ad699713e77c283d55f3555
SwiGLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qy/cqycxwukc2ovy7fq4yqkhvz77tl2rhtpffsix7oclxznyexrjof4.py # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.silu, aten.mul] # Source node to ATen node mapping: # g => mul, sigmoid # x => mul_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_3), kwargs = {}) triton_poi_fused_mul_silu_0 = async_compile.triton('triton_poi_fused_mul_silu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_silu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.silu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_silu_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class SwiGLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU( ), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_silu_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class SwiGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
robburdon/pytorch_tabular
SwiGLU
false
16,325
[ "MIT" ]
560
9bf75f22c6e1b3033ad699713e77c283d55f3555
https://github.com/robburdon/pytorch_tabular/tree/9bf75f22c6e1b3033ad699713e77c283d55f3555
SA_block_def
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rs/crsgat4ptw2dtljg4exisjbqh77ev3urqbzb7jzp2hllhkk7se3j.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x3), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ge/cgefolnutrcupniauwzq5a2ztlcizjsxkttjv6awqq2cyukie4sr.py # Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_group_norm_3 = async_compile.triton('triton_poi_fused_native_group_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iv/civg53dfo5dr33zzqui36364uf7q2fz4pt4sja5txeihd3r326v3.py # Topologically Sorted Source Nodes: [group_norm, x_2], Original ATen: [aten.native_group_norm, aten.add] # Source node to ATen node mapping: # group_norm => add_1, mul_1 # x_2 => add_2 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_1), kwargs = {}) triton_poi_fused_add_native_group_norm_4 = async_compile.triton('triton_poi_fused_add_native_group_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x3), xmask) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [p], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [g], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0), out=buf6) buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [energy_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 4), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf10) buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 8), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0) buf14 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0), out=buf14) buf15 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [energy_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 12), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0) buf17 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [attention_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf17, (4, 4, 4), (16, 1, 4), 0), out=buf18) buf19 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0) del buf10 del buf14 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf20, (4, 4, 4), (16, 4, 1)) buf21 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf6 # reuse buf22 = reinterpret_tensor(buf18, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_3.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [group_norm, x_2], Original ATen: [aten.native_group_norm, aten.add] triton_poi_fused_add_native_group_norm_4.run(buf20, buf21, buf22, primals_7, primals_8, primals_1, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_8 return (buf23, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf5, buf9, buf13, buf17, buf19, buf20, reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SA_block_def(nn.Module): """Self-Attention block with dot product for point/voxel/pillar context. """ def __init__(self, inplanes, planes, groups=4): super().__init__() self.groups = groups self.t = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.p = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.g = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.z = nn.Conv1d(planes, inplanes, kernel_size=1, stride=1, groups=self.groups, bias=False) self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes) self.softmax = nn.Softmax(dim=-1) def kernel(self, t, p, g, b, c, h): """Return the output after dot product per head Args: t: output of linear value p: output of linear query g: output of linear keys b: batch size c: no of channels h: spatial breadth of feature maps """ proj_query = p.permute(0, 2, 1) proj_key = g energy = torch.bmm(proj_query, proj_key) total_energy = energy attention = self.softmax(total_energy) proj_value = t out = torch.bmm(proj_value, attention.permute(0, 2, 1)) return out def forward(self, x, y): residual = x t = self.t(y) p = self.p(x) g = self.g(y) b, c, h = t.size() if self.groups and self.groups > 1: _c = int(c / self.groups) ts = torch.split(t, split_size_or_sections=_c, dim=1) ps = torch.split(p, split_size_or_sections=_c, dim=1) gs = torch.split(g, split_size_or_sections=_c, dim=1) _t_sequences = [] for i in range(self.groups): _x = self.kernel(ts[i], ps[i], gs[i], b, _c, h) _t_sequences.append(_x) x = torch.cat(_t_sequences, dim=1) else: x = self.kernel(t, p, g, b, c, h) x = self.z(x) x = self.gn(x) + residual return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy ='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy ='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x3, tmp22, xmask) @triton.jit def triton_poi_fused_native_group_norm_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x3, xmask) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_3, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 4), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 8), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 12), out=buf15 ) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf17, (4, 4, 4), (16, 1, 4), 0), out=buf18 ) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = extern_kernels.convolution(buf19, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf20, (4, 4, 4), (16, 4, 1)) buf21 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf6 buf22 = reinterpret_tensor(buf18, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf18 triton_poi_fused_native_group_norm_3[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_group_norm_4[grid(64)](buf20, buf21, buf22, primals_7, primals_8, primals_1, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_8 return (buf23, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf5, buf9, buf13, buf17, buf19, buf20, reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 0)) class SA_block_defNew(nn.Module): """Self-Attention block with dot product for point/voxel/pillar context. """ def __init__(self, inplanes, planes, groups=4): super().__init__() self.groups = groups self.t = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.p = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.g = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.z = nn.Conv1d(planes, inplanes, kernel_size=1, stride=1, groups=self.groups, bias=False) self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes) self.softmax = nn.Softmax(dim=-1) def kernel(self, t, p, g, b, c, h): """Return the output after dot product per head Args: t: output of linear value p: output of linear query g: output of linear keys b: batch size c: no of channels h: spatial breadth of feature maps """ proj_query = p.permute(0, 2, 1) proj_key = g energy = torch.bmm(proj_query, proj_key) total_energy = energy attention = self.softmax(total_energy) proj_value = t out = torch.bmm(proj_value, attention.permute(0, 2, 1)) return out def forward(self, input_0, input_1): primals_2 = self.t.weight primals_4 = self.p.weight primals_5 = self.g.weight primals_6 = self.z.weight primals_7 = self.gn.weight primals_8 = self.gn.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
reinforcementdriving/SA-Det3D
SA_block_def
false
16,326
[ "MIT" ]
134
682cbf5a3023bd580632435d1e4e0acb0ae08ab8
https://github.com/reinforcementdriving/SA-Det3D/tree/682cbf5a3023bd580632435d1e4e0acb0ae08ab8
SA_block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rs/crsgat4ptw2dtljg4exisjbqh77ev3urqbzb7jzp2hllhkk7se3j.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x3), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ge/cgefolnutrcupniauwzq5a2ztlcizjsxkttjv6awqq2cyukie4sr.py # Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_group_norm_3 = async_compile.triton('triton_poi_fused_native_group_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iv/civg53dfo5dr33zzqui36364uf7q2fz4pt4sja5txeihd3r326v3.py # Topologically Sorted Source Nodes: [group_norm, x_2], Original ATen: [aten.native_group_norm, aten.add] # Source node to ATen node mapping: # group_norm => add_1, mul_1 # x_2 => add_2 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %unsqueeze_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_1), kwargs = {}) triton_poi_fused_add_native_group_norm_4 = async_compile.triton('triton_poi_fused_add_native_group_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x3), xmask) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [p], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [g], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0), out=buf6) buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [energy_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 4), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf10) buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 8), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0) buf14 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0), out=buf14) buf15 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [energy_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 12), out=buf15) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0) buf17 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [attention_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf17, (4, 4, 4), (16, 1, 4), 0), out=buf18) buf19 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0) del buf10 del buf14 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf20, (4, 4, 4), (16, 4, 1)) buf21 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf6 # reuse buf22 = reinterpret_tensor(buf18, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_3.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [group_norm, x_2], Original ATen: [aten.native_group_norm, aten.add] triton_poi_fused_add_native_group_norm_4.run(buf20, buf21, buf22, primals_6, primals_7, primals_1, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_7 return (buf23, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf5, buf9, buf13, buf17, buf19, buf20, reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SA_block(nn.Module): """Self-Attention block with dot product for point/voxel/pillar context. A part of the code is from MLCVNet (CVPR 2020). """ def __init__(self, inplanes, planes, groups=4): super().__init__() self.groups = groups self.t = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.p = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.g = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.z = nn.Conv1d(planes, inplanes, kernel_size=1, stride=1, groups=self.groups, bias=False) self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes) self.softmax = nn.Softmax(dim=-1) def kernel(self, t, p, g, b, c, h): """Return the output after dot product per head Args: t: output of linear value p: output of linear query g: output of linear keys b: batch size c: no of channels h: spatial breadth of feature maps """ proj_query = p.view(b, c, h).permute(0, 2, 1) proj_key = g energy = torch.bmm(proj_query, proj_key) total_energy = energy attention = self.softmax(total_energy) proj_value = t out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(b, c, h) return out def forward(self, x): residual = x t = self.t(x) p = self.p(x) g = self.g(x) b, c, h = t.size() if self.groups and self.groups > 1: _c = int(c / self.groups) ts = torch.split(t, split_size_or_sections=_c, dim=1) ps = torch.split(p, split_size_or_sections=_c, dim=1) gs = torch.split(g, split_size_or_sections=_c, dim=1) _t_sequences = [] for i in range(self.groups): _x = self.kernel(ts[i], ps[i], gs[i], b, _c, h) _t_sequences.append(_x) x = torch.cat(_t_sequences, dim=1) else: x = self.kernel(t, p, g, b, c, h) x = self.z(x) x = self.gn(x) + residual return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy ='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy ='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x3, tmp22, xmask) @triton.jit def triton_poi_fused_native_group_norm_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x3, xmask) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0), out=buf6) buf7 = buf4 del buf4 extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 4), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf10) buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 8), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0), out=buf14) buf15 = buf12 del buf12 extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf2, (4, 1, 4), (16, 4, 1), 12), out=buf15 ) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf17, (4, 4, 4), (16, 1, 4), 0), out=buf18 ) buf19 = buf16 del buf16 triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf14 buf20 = extern_kernels.convolution(buf19, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf20, (4, 4, 4), (16, 4, 1)) buf21 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf6 buf22 = reinterpret_tensor(buf18, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf18 triton_poi_fused_native_group_norm_3[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_group_norm_4[grid(64)](buf20, buf21, buf22, primals_6, primals_7, primals_1, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_7 return (buf23, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf5, buf9, buf13, buf17, buf19, buf20, reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 12), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 12), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 8), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 8), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 4), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 4), reinterpret_tensor(buf0, (4, 4, 1), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (16, 1, 4), 0)) class SA_blockNew(nn.Module): """Self-Attention block with dot product for point/voxel/pillar context. A part of the code is from MLCVNet (CVPR 2020). """ def __init__(self, inplanes, planes, groups=4): super().__init__() self.groups = groups self.t = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.p = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.g = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias= False) self.z = nn.Conv1d(planes, inplanes, kernel_size=1, stride=1, groups=self.groups, bias=False) self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes) self.softmax = nn.Softmax(dim=-1) def kernel(self, t, p, g, b, c, h): """Return the output after dot product per head Args: t: output of linear value p: output of linear query g: output of linear keys b: batch size c: no of channels h: spatial breadth of feature maps """ proj_query = p.view(b, c, h).permute(0, 2, 1) proj_key = g energy = torch.bmm(proj_query, proj_key) total_energy = energy attention = self.softmax(total_energy) proj_value = t out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(b, c, h) return out def forward(self, input_0): primals_2 = self.t.weight primals_3 = self.p.weight primals_4 = self.g.weight primals_5 = self.z.weight primals_6 = self.gn.weight primals_7 = self.gn.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
reinforcementdriving/SA-Det3D
SA_block
false
16,327
[ "MIT" ]
134
682cbf5a3023bd580632435d1e4e0acb0ae08ab8
https://github.com/reinforcementdriving/SA-Det3D/tree/682cbf5a3023bd580632435d1e4e0acb0ae08ab8
Conv3DBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lz/clzox63go7lejkefprlsxdy6wx6glwzkvxyrrxmfrvxysw37q77d.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad3d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1, _unsafe_index_2 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, %clamp_max, None, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_1, [None, None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad3d_0 = async_compile.triton('triton_poi_fused_replication_pad3d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad3d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad3d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) % 8 x2 = (xindex // 64) % 8 x3 = (xindex // 512) x4 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-2) + x1)) + ((-2) + x1) * (((-2) + x1) > (0))))) + (((0) * ((0) >= ((-2) + x1)) + ((-2) + x1) * (((-2) + x1) > (0)))) * ((((0) * ((0) >= ((-2) + x1)) + ((-2) + x1) * (((-2) + x1) > (0)))) < (3)))) + (16*((3) * ((3) <= (((0) * ((0) >= ((-2) + x2)) + ((-2) + x2) * (((-2) + x2) > (0))))) + (((0) * ((0) >= ((-2) + x2)) + ((-2) + x2) * (((-2) + x2) > (0)))) * ((((0) * ((0) >= ((-2) + x2)) + ((-2) + x2) * (((-2) + x2) > (0)))) < (3)))) + (64*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mh/cmhjvcw7vp2m7zyf4g2rdf2dnwmqydkfy7olzilajacbiw7vmsgu.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 500 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 125) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.replication_pad3d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad3d_0.run(primals_3, buf0, 2048, grid=grid(2048), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8, 8, 8), (0, 512, 64, 8, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 5, 5, 5), (500, 125, 25, 5, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_2, 500, grid=grid(500), stream=stream0) del primals_2 return (reinterpret_tensor(buf2, (4, 5, 5, 5), (125, 25, 5, 1), 0), primals_1, reinterpret_tensor(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from typing import Union def act_layer(act): if act == 'relu': return nn.ReLU() elif act == 'lrelu': return nn.LeakyReLU(LRELU_SLOPE) elif act == 'elu': return nn.ELU() elif act == 'tanh': return nn.Tanh() elif act == 'prelu': return nn.PReLU() else: raise ValueError('%s not recognized.' % act) class Conv3DBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_sizes: 'Union[int, list]', strides, norm=None, activation=None, padding_mode='replicate', padding=None): super(Conv3DBlock, self).__init__() padding = kernel_sizes // 2 if padding is None else padding self.conv3d = nn.Conv3d(in_channels, out_channels, kernel_sizes, strides, padding=padding, padding_mode=padding_mode) if activation is None: nn.init.xavier_uniform_(self.conv3d.weight, gain=nn.init. calculate_gain('linear')) nn.init.zeros_(self.conv3d.bias) elif activation == 'tanh': nn.init.xavier_uniform_(self.conv3d.weight, gain=nn.init. calculate_gain('tanh')) nn.init.zeros_(self.conv3d.bias) elif activation == 'lrelu': nn.init.kaiming_uniform_(self.conv3d.weight, a=LRELU_SLOPE, nonlinearity='leaky_relu') nn.init.zeros_(self.conv3d.bias) elif activation == 'relu': nn.init.kaiming_uniform_(self.conv3d.weight, nonlinearity='relu') nn.init.zeros_(self.conv3d.bias) else: raise ValueError() self.activation = None self.norm = None if norm is not None: self.norm = norm_layer3d(norm, out_channels) if activation is not None: self.activation = act_layer(activation) def forward(self, x): x = self.conv3d(x) x = self.norm(x) if self.norm is not None else x x = self.activation(x) if self.activation is not None else x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_sizes': 4, 'strides': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from typing import Union assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_replication_pad3d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 % 8 x3 = xindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0)) + (0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0)) * (0 * (0 >= -2 + x1) + (-2 + x1) * (-2 + x1 > 0) < 3)) + 16 * (3 * ( 3 <= 0 * (0 >= -2 + x2) + (-2 + x2) * (-2 + x2 > 0)) + (0 * (0 >= - 2 + x2) + (-2 + x2) * (-2 + x2 > 0)) * (0 * (0 >= -2 + x2) + (-2 + x2) * (-2 + x2 > 0) < 3)) + 64 * x3 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), None, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 500 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 125 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad3d_0[grid(2048)](primals_3, buf0, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8, 8, 8), (0, 512, 64, 8, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 5, 5, 5), (500, 125, 25, 5, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(500)](buf2, primals_2, 500, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf2, (4, 5, 5, 5), (125, 25, 5, 1), 0 ), primals_1, reinterpret_tensor(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1), 0) def act_layer(act): if act == 'relu': return nn.ReLU() elif act == 'lrelu': return nn.LeakyReLU(LRELU_SLOPE) elif act == 'elu': return nn.ELU() elif act == 'tanh': return nn.Tanh() elif act == 'prelu': return nn.PReLU() else: raise ValueError('%s not recognized.' % act) class Conv3DBlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_sizes: 'Union[int, list]', strides, norm=None, activation=None, padding_mode='replicate', padding=None): super(Conv3DBlockNew, self).__init__() padding = kernel_sizes // 2 if padding is None else padding self.conv3d = nn.Conv3d(in_channels, out_channels, kernel_sizes, strides, padding=padding, padding_mode=padding_mode) if activation is None: nn.init.xavier_uniform_(self.conv3d.weight, gain=nn.init. calculate_gain('linear')) nn.init.zeros_(self.conv3d.bias) elif activation == 'tanh': nn.init.xavier_uniform_(self.conv3d.weight, gain=nn.init. calculate_gain('tanh')) nn.init.zeros_(self.conv3d.bias) elif activation == 'lrelu': nn.init.kaiming_uniform_(self.conv3d.weight, a=LRELU_SLOPE, nonlinearity='leaky_relu') nn.init.zeros_(self.conv3d.bias) elif activation == 'relu': nn.init.kaiming_uniform_(self.conv3d.weight, nonlinearity='relu') nn.init.zeros_(self.conv3d.bias) else: raise ValueError() self.activation = None self.norm = None if norm is not None: self.norm = norm_layer3d(norm, out_channels) if activation is not None: self.activation = act_layer(activation) def forward(self, input_0): primals_1 = self.conv3d.weight primals_2 = self.conv3d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
rll-research/ARM
Conv3DBlock
false
16,328
[ "BSD-3-Clause" ]
46
7a51e00fabdcdbd8ad2b235266c66115e79deeb0
https://github.com/rll-research/ARM/tree/7a51e00fabdcdbd8ad2b235266c66115e79deeb0
rpn_head
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tr/ctrjfdxh4hxjgnhgsb6j7blrx7nmmgvcuevebukqbiklmkeum6zf.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1048576 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = (yindex // 1024) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yr/cyrzslz775i7ihcuhg6kfta5526phnjqf73cvra3dvya5zlnookc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = (yindex // 1024) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None) tl.store(out_ptr0 + (y0 + (1024*x2) + (4194304*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5j/c5jdurevfd2gyhy6hcfrbgna2e5rcgdks5xq2g6k7nysq6xm7nox.py # Topologically Sorted Source Nodes: [conv2d, conv_rpn], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # conv_rpn => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16777216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6q/c6qzye5kg5nh4ufvf2g5vd7nttlp6gnyts4gisx5fybgydstai2b.py # Topologically Sorted Source Nodes: [conv2d_1, rpn_cls_prob], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # rpn_cls_prob => sigmoid # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_sigmoid_3 = async_compile.triton('triton_poi_fused_convolution_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 60 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 15 y1 = (yindex // 15) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (15*x2) + (61440*y1)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + (4096*y3)), tmp3, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l6/cl6wxjwbtwpfi5umpxnohcw4wdnigh7p3bxmfzpmywqmofmkgssl.py # Topologically Sorted Source Nodes: [rpn_bbox_pred], Original ATen: [aten.convolution] # Source node to ATen node mapping: # rpn_bbox_pred => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 240 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 60 y1 = (yindex // 60) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (60*x2) + (245760*y1)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_2, (1024, ), (1, )) assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_4, (15, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_5, (15, ), (1, )) assert_size_stride(primals_6, (60, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_7, (60, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 4096, 4096, grid=grid(4096, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1024, 64, 64), (4194304, 1, 65536, 1024)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d, conv_rpn], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf3, primals_2, 16777216, grid=grid(16777216), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 15, 64, 64), (61440, 1, 960, 15)) buf5 = empty_strided_cuda((4, 15, 64, 64), (61440, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, rpn_cls_prob], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_3.run(buf4, primals_5, buf5, 60, 4096, grid=grid(60, 4096), stream=stream0) del buf4 del primals_5 # Topologically Sorted Source Nodes: [rpn_bbox_pred], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 60, 64, 64), (245760, 1, 3840, 60)) buf7 = empty_strided_cuda((4, 60, 64, 64), (245760, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [rpn_bbox_pred], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf6, primals_7, buf7, 240, 4096, grid=grid(240, 4096), stream=stream0) del buf6 del primals_7 return (buf5, buf7, buf0, buf1, primals_4, primals_6, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((15, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((15, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((60, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((60, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class rpn_head(torch.nn.Module): def __init__(self, in_channels=1024, out_channels=1024, n_anchors=15): super(rpn_head, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.sigmoid = torch.nn.Sigmoid() self.conv_rpn = torch.nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1) self.rpn_cls_prob = torch.nn.Conv2d(out_channels, n_anchors, 1, stride=1, padding=0) self.rpn_bbox_pred = torch.nn.Conv2d(out_channels, 4 * n_anchors, 1, stride=1, padding=0) def forward(self, x): conv_rpn = self.relu(self.conv_rpn(x)) rpn_cls_prob = self.sigmoid(self.rpn_cls_prob(conv_rpn)) rpn_bbox_pred = self.rpn_bbox_pred(conv_rpn) return rpn_cls_prob, rpn_bbox_pred def get_inputs(): return [torch.rand([4, 1024, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None) tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 60 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 15 y1 = yindex // 15 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 15 * x2 + 61440 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) @triton.jit def triton_poi_fused_convolution_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 240 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 60 y1 = yindex // 60 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 60 * x2 + 245760 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_4, (15, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_5, (15,), (1,)) assert_size_stride(primals_6, (60, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_7, (60,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(1048576, 9)](primals_1, buf0, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) triton_poi_fused_1[grid(4096, 4096)](primals_3, buf1, 4096, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1024, 64, 64), (4194304, 1, 65536, 1024)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_2[grid(16777216)](buf3, primals_2, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 15, 64, 64), (61440, 1, 960, 15)) buf5 = empty_strided_cuda((4, 15, 64, 64), (61440, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_3[grid(60, 4096)](buf4, primals_5, buf5, 60, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf4 del primals_5 buf6 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 60, 64, 64), (245760, 1, 3840, 60)) buf7 = empty_strided_cuda((4, 60, 64, 64), (245760, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_4[grid(240, 4096)](buf6, primals_7, buf7, 240, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf6 del primals_7 return buf5, buf7, buf0, buf1, primals_4, primals_6, buf3, buf5 class rpn_headNew(torch.nn.Module): def __init__(self, in_channels=1024, out_channels=1024, n_anchors=15): super(rpn_headNew, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.sigmoid = torch.nn.Sigmoid() self.conv_rpn = torch.nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1) self.rpn_cls_prob = torch.nn.Conv2d(out_channels, n_anchors, 1, stride=1, padding=0) self.rpn_bbox_pred = torch.nn.Conv2d(out_channels, 4 * n_anchors, 1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv_rpn.weight primals_2 = self.conv_rpn.bias primals_4 = self.rpn_cls_prob.weight primals_5 = self.rpn_cls_prob.bias primals_6 = self.rpn_bbox_pred.weight primals_7 = self.rpn_bbox_pred.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
peckjon/detectorch
rpn_head
false
16,329
[ "Apache-2.0" ]
627
69d31250d79a72b12b7419638ef59163f833bbba
https://github.com/peckjon/detectorch/tree/69d31250d79a72b12b7419638ef59163f833bbba
AttentionLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hm/chmzhz6v4tirfzpa5ycdrznhmrdfnzid5axw4woohnq3wcsf3vqu.py # Topologically Sorted Source Nodes: [num_pos, sub, num_neg, add, alpha, sub_1, pow_1, edge_beta, mul, log, mul_1, mul_2, loss, sub_3, pow_3, bg_beta, mul_3, sub_4, log_1, mul_4, sub_5, mul_5, loss_1, mean], Original ATen: [aten.sum, aten.rsub, aten.add, aten.div, aten.pow, aten.mul, aten.log, aten.sub, aten.mean] # Source node to ATen node mapping: # add => add # alpha => div # bg_beta => pow_4 # edge_beta => pow_2 # log => log # log_1 => log_1 # loss => sub_2 # loss_1 => sub_6 # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # num_neg => sum_2 # num_pos => sum_1 # pow_1 => pow_1 # pow_3 => pow_3 # sub => sub # sub_1 => sub_1 # sub_3 => sub_3 # sub_4 => sub_4 # sub_5 => sub_5 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 0.5), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Scalar](args = (4, %pow_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_2), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %arg0_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %mul_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 0.5), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Scalar](args = (4, %pow_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %pow_4), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_4,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %log_1), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %sub_5), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %mul_5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_6,), kwargs = {}) triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp11 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tmp3 + tmp8 tmp10 = tmp8 / tmp9 tmp12 = tmp4 - tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 4.0 tmp15 = libdevice.pow(tmp14, tmp13) tmp16 = tmp10 * tmp15 tmp17 = tl_math.log(tmp11) tmp18 = tmp16 * tmp17 tmp19 = tmp18 * tmp0 tmp20 = 0.0 tmp21 = tmp20 - tmp19 tmp22 = tmp4 - tmp10 tmp23 = libdevice.sqrt(tmp11) tmp24 = libdevice.pow(tmp14, tmp23) tmp25 = tmp22 * tmp24 tmp26 = tl_math.log(tmp12) tmp27 = tmp25 * tmp26 tmp28 = tmp27 * tmp5 tmp29 = tmp21 - tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = 256.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp34, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [num_pos, sub, num_neg, add, alpha, sub_1, pow_1, edge_beta, mul, log, mul_1, mul_2, loss, sub_3, pow_3, bg_beta, mul_3, sub_4, log_1, mul_4, sub_5, mul_5, loss_1, mean], Original ATen: [aten.sum, aten.rsub, aten.add, aten.div, aten.pow, aten.mul, aten.log, aten.sub, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class AttentionLoss(nn.Module): def __init__(self, beta=4, gamma=0.5): super(AttentionLoss, self).__init__() self.beta = beta self.gamma = gamma def forward(self, pred, gt): num_pos = torch.sum(gt) num_neg = torch.sum(1 - gt) alpha = num_neg / (num_pos + num_neg) edge_beta = torch.pow(self.beta, torch.pow(1 - pred, self.gamma)) bg_beta = torch.pow(self.beta, torch.pow(pred, self.gamma)) loss = 0 loss = loss - alpha * edge_beta * torch.log(pred) * gt loss = loss - (1 - alpha) * bg_beta * torch.log(1 - pred) * (1 - gt) return torch.mean(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp11 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 1.0 tmp5 = tmp4 - tmp0 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tmp3 + tmp8 tmp10 = tmp8 / tmp9 tmp12 = tmp4 - tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 4.0 tmp15 = libdevice.pow(tmp14, tmp13) tmp16 = tmp10 * tmp15 tmp17 = tl_math.log(tmp11) tmp18 = tmp16 * tmp17 tmp19 = tmp18 * tmp0 tmp20 = 0.0 tmp21 = tmp20 - tmp19 tmp22 = tmp4 - tmp10 tmp23 = libdevice.sqrt(tmp11) tmp24 = libdevice.pow(tmp14, tmp23) tmp25 = tmp22 * tmp24 tmp26 = tl_math.log(tmp12) tmp27 = tmp25 * tmp26 tmp28 = tmp27 * tmp5 tmp29 = tmp21 - tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = 256.0 tmp34 = tmp32 / tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_pow_rsub_sub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class AttentionLossNew(nn.Module): def __init__(self, beta=4, gamma=0.5): super(AttentionLossNew, self).__init__() self.beta = beta self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
robtu328/TextBPN
AttentionLoss
false
16,330
[ "MIT" ]
49
225844770e0107817be9fb86d53f873fa3eb07ae
https://github.com/robtu328/TextBPN/tree/225844770e0107817be9fb86d53f873fa3eb07ae
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ii/ciitunocanka4uci3umi7hrzcyuhvksptkdqwrkivu6v426gkycl.py # Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # out => mul # x => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %div), kwargs = {}) triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm.expand_as(x)) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
robtu328/TextDetCorner
L2Norm
false
16,331
[ "Python-2.0", "OLDAP-2.7" ]
331
f37ef0e1d2068c5fbd643855acd21787a2c122c5
https://github.com/robtu328/TextDetCorner/tree/f37ef0e1d2068c5fbd643855acd21787a2c122c5
GEGLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/em/cemg2y45l6deao7jdm2w6eokhhfga7r2ohhlsue6j5tme4ofmwni.py # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.gelu, aten.mul] # Source node to ATen node mapping: # g => add, erf, mul, mul_1, mul_2 # x => mul_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view_3), kwargs = {}) triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, x], Original ATen: [aten.gelu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_gelu_mul_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GEGLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU( ), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp9 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GEGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
robburdon/pytorch_tabular
GEGLU
false
16,332
[ "MIT" ]
560
9bf75f22c6e1b3033ad699713e77c283d55f3555
https://github.com/robburdon/pytorch_tabular/tree/9bf75f22c6e1b3033ad699713e77c283d55f3555
Mnist_CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wr/cwrjr75ksmgrel4djhr27volr7bzoob6ixyduxm74y2ky6udtlzn.py # Topologically Sorted Source Nodes: [conv2d, xb_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # xb_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 196) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iz/cizeqrrbvjbeoqr7ojtcvnagh6alsvquwz2qlmnzzbytbq77tnn4.py # Topologically Sorted Source Nodes: [conv2d_1, xb_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # xb_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 49) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/23/c237kritlyjcuojdcyxxqqygn46332pp2fgszsmeqpe2chj5uwq2.py # Topologically Sorted Source Nodes: [conv2d_2, xb_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # xb_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rr/crrealymt6xnsywuzmxru2w3tcsoludjyewecl5ik2qqtmaogdt2.py # Topologically Sorted Source Nodes: [xb_4], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # xb_4 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%relu_2, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, xb_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 12544, grid=grid(12544), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, xb_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 3136, grid=grid(3136), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, xb_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 640, grid=grid(640), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [xb_4], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_3.run(buf5, buf6, 40, grid=grid(40), stream=stream0) return (reinterpret_tensor(buf6, (4, 10), (10, 1), 0), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Mnist_CNN(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1) def forward(self, xb): xb = xb.view(-1, 1, 28, 28) xb = F.relu(self.conv1(xb)) xb = F.relu(self.conv2(xb)) xb = F.relu(self.conv3(xb)) xb = F.avg_pool2d(xb, 4) return xb.view(-1, xb.size(1)) def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 49 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(12544)](buf1, primals_3, 12544, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(3136)](buf3, primals_5, 3136, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(640)](buf5, primals_7, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32) triton_poi_fused_avg_pool2d_3[grid(40)](buf5, buf6, 40, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf6, (4, 10), (10, 1), 0 ), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5 class Mnist_CNNNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
rgommers/tutorials
Mnist_CNN
false
16,333
[ "BSD-3-Clause" ]
6,424
9341570d4d8ed2c77371eac3b8520f7038d731ee
https://github.com/rgommers/tutorials/tree/9341570d4d8ed2c77371eac3b8520f7038d731ee
CoxPHLossSorted
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5s/c5shsgehfk5krlitzfx6dsoni2ot3vtyhzwhw5d6md32oxwp3feg.py # Topologically Sorted Source Nodes: [gamma, sub, exp, cumsum, add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2, div, neg], Original ATen: [aten.max, aten.sub, aten.exp, aten.cumsum, aten.add, aten.log, aten.mul, aten.sum, aten.div, aten.neg] # Source node to ATen node mapping: # add => add # cumsum => cumsum # div => div # exp => exp # gamma => max_1 # log => log # log_cumsum_h => add_1 # mul => mul # neg => neg # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%view_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %max_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%exp, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cumsum, 1e-07), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %max_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {}) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0 = async_compile.triton('triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp14 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5.to(tl.float32) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp8, = tl.associative_scan((tmp7,), 0, _triton_helper_fn_add0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = tl.broadcast_to(tmp14, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tmp18 / tmp21 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [gamma, sub, exp, cumsum, add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2, div, neg], Original ATen: [aten.max, aten.sub, aten.exp, aten.cumsum, aten.add, aten.log, aten.mul, aten.sum, aten.div, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0.run(buf4, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) class CoxPHLossSorted(torch.nn.Module): """Loss for CoxPH. Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def __init__(self): super().__init__() def forward(self, log_h: 'Tensor', events: 'Tensor') ->Tensor: return cox_ph_loss_sorted(log_h, events) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp14 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5.to(tl.float32) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp8, = tl.associative_scan((tmp7,), 0, _triton_helper_fn_add0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = tl.broadcast_to(tmp14, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tmp18 / tmp21 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0[grid(1)]( buf4, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) class CoxPHLossSortedNew(torch.nn.Module): """Loss for CoxPH. Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
rohanshad/pycox
CoxPHLossSorted
false
16,334
[ "BSD-2-Clause" ]
449
5483489d21f3441e53f78f9f8898ce607f41c632
https://github.com/rohanshad/pycox/tree/5483489d21f3441e53f78f9f8898ce607f41c632
CoxPHLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ti/ctiljzl4dc6aw3rse3i33dz7ntgejsdixtq5q2it3jzqtvyozmu5.py # Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort] # Source node to ATen node mapping: # sort => sort # Graph fragment: # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%arg1_1, -1, True), kwargs = {}) triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i16', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = r1 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True) tl.store(out_ptr0 + (r1 + (4*x0)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bu/cbuvn7q5hcxxos2llx7iyucnrqp5rxu3phitjjgtdiexbwfpfsls.py # Topologically Sorted Source Nodes: [gamma], Original ATen: [aten.max] # Source node to ATen node mapping: # gamma => max_1 # Graph fragment: # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%view_1,), kwargs = {}) triton_red_fused_max_1 = async_compile.triton('triton_red_fused_max_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[2, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_max_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp9 = tl.full([XBLOCK, RBLOCK], float("-inf"), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + ((128*x0) + (r1 // 64)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(rmask & xmask), "index out of bounds: 0 <= tmp5 < 4") tmp7 = tl.load(in_ptr1 + ((64*tmp5) + (r1 % 64)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = triton_helpers.maximum(_tmp9, tmp8) _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp9 = triton_helpers.max2(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4s/c4svd7ast6nrnrq3ddyyzhoyhw3iwxlnxt4jkl7u57tggu4fsrqh.py # Topologically Sorted Source Nodes: [gamma], Original ATen: [aten.max] # Source node to ATen node mapping: # gamma => max_1 # Graph fragment: # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%view_1,), kwargs = {}) triton_per_fused_max_2 = async_compile.triton('triton_per_fused_max_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gn/cgns6funurqslgwtbqh733vpcpjwcpxezudfgmk2gyx6mbrt5dj4.py # Topologically Sorted Source Nodes: [sub, exp, cumsum], Original ATen: [aten.sub, aten.exp, aten.cumsum] # Source node to ATen node mapping: # cumsum => cumsum # exp => exp # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %max_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%exp, 0), kwargs = {}) triton_spl_fused_cumsum_exp_sub_3 = async_compile.triton('triton_spl_fused_cumsum_exp_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.split_scan( size_hints=[1, 16384], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i8', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_spl_fused_cumsum_exp_sub_3', 'mutated_arg_names': ['ws_ptr'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_spl_fused_cumsum_exp_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ws_ptr, xnumel, rnumel, RBLOCK : tl.constexpr): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 16384 xoffset = tl.program_id(1) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) roffset = tl.program_id(0) * RBLOCK rindex = roffset + tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + ((r0 // 64)), rmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + (0)) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp12 = tl.num_programs(0) tmp13 = ws_ptr.to(tl.pointer_type(tl.uint64)) + xoffset * 1 * tmp12 tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(rmask), "index out of bounds: 0 <= tmp5 < 4") tmp7 = tl.load(in_ptr1 + ((64*tmp5) + (r0 % 64)), rmask, other=0.0) tmp10 = tmp7 - tmp9 tmp11 = tl_math.exp(tmp10) tmp14 = tmp11.to(tl.float32) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp16 = tl.reduce(tmp15, 0, _triton_helper_fn_add0) tmp17 = triton_helpers.exclusive_scan_decoupled_lookback( tmp13, tmp16, tl.program_id(0), _triton_helper_fn_add0, DTYPE_VALUE_AS_UINT=tl.uint32, DTYPE_PACK=tl.uint64, ) tmp18 = tl.associative_scan(tmp15, 0, _triton_helper_fn_add0) tmp19 = _triton_helper_fn_add0(tmp17, tmp18) tmp20 = tl.where(roffset == 0, tmp18, tmp19) tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp20, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sr/csraps56wurb43vdz2i3p323w5in3o6s6evt3gnybxlxhm7k6x7m.py # Topologically Sorted Source Nodes: [add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2], Original ATen: [aten.add, aten.log, aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # add => add # log => log # log_cumsum_h => add_1 # mul => mul # sub_1 => sub_1 # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cumsum, 1e-07), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %max_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) triton_red_fused_add_log_mul_sub_sum_4 = async_compile.triton('triton_red_fused_add_log_mul_sub_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[2, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_log_mul_sub_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_log_mul_sub_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp12 = tl.load(in_ptr3 + (0)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) _tmp19 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp22 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + ((128*x0) + (r1 // 64)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(rmask & xmask), "index out of bounds: 0 <= tmp5 < 4") tmp7 = tl.load(in_ptr1 + ((64*tmp5) + (r1 % 64)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp14 = tmp11 + tmp13 tmp15 = tmp7 - tmp14 tmp16 = tl.load(in_ptr4 + ((64*tmp5) + (r1 % 64)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = _tmp19 + tmp18 _tmp19 = tl.where(rmask & xmask, tmp20, _tmp19) tmp21 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp23 = _tmp22 + tmp21 _tmp22 = tl.where(rmask & xmask, tmp23, _tmp22) tmp19 = tl.sum(_tmp19, 1)[:, None] tl.store(out_ptr0 + (x0), tmp19, xmask) tmp22 = tl.sum(_tmp22, 1)[:, None] tl.store(out_ptr1 + (x0), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ws/cws2kvw5nbabxumdolhguy43oogrbdvxc52jldvpi4mb3jfcgkrz.py # Topologically Sorted Source Nodes: [add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2, div, neg], Original ATen: [aten.add, aten.log, aten.sub, aten.mul, aten.sum, aten.div, aten.neg] # Source node to ATen node mapping: # add => add # div => div # log => log # log_cumsum_h => add_1 # mul => mul # neg => neg # sub_1 => sub_1 # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cumsum, 1e-07), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %max_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {}) triton_per_fused_add_div_log_mul_neg_sub_sum_5 = async_compile.triton('triton_per_fused_add_div_log_mul_neg_sub_sum_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_neg_sub_sum_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mul_neg_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp4 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp3 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) # Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort] stream0 = get_raw_stream(0) triton_per_fused_sort_0.run(arg1_1, buf1, 64, 4, grid=grid(64), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((2, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [gamma], Original ATen: [aten.max] triton_red_fused_max_1.run(buf1, arg0_1, buf2, 2, 8192, grid=grid(2), stream=stream0) buf3 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [gamma], Original ATen: [aten.max] triton_per_fused_max_2.run(buf2, buf3, 1, 2, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((16384, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub, exp, cumsum], Original ATen: [aten.sub, aten.exp, aten.cumsum] workspace = empty_strided_cuda((512, ), (1, ), torch.uint8) workspace.zero_() triton_spl_fused_cumsum_exp_sub_3.run(buf1, arg0_1, buf3, buf4, workspace, 1, 16384, grid=split_scan_grid(1, 16384), stream=stream0) del workspace buf5 = buf2; del buf2 # reuse buf7 = empty_strided_cuda((2, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2], Original ATen: [aten.add, aten.log, aten.sub, aten.mul, aten.sum] triton_red_fused_add_log_mul_sub_sum_4.run(buf1, arg0_1, buf4, buf3, arg2_1, buf5, buf7, 2, 8192, grid=grid(2), stream=stream0) del arg0_1 del arg2_1 del buf1 del buf4 buf6 = buf3; del buf3 # reuse buf9 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2, div, neg], Original ATen: [aten.add, aten.log, aten.sub, aten.mul, aten.sum, aten.div, aten.neg] triton_per_fused_add_div_log_mul_neg_sub_sum_5.run(buf9, buf5, buf7, 1, 2, grid=grid(1), stream=stream0) del buf5 del buf7 return (buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) def cox_ph_loss(log_h: 'Tensor', durations: 'Tensor', events: 'Tensor', eps: 'float'=1e-07) ->Tensor: """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ idx = durations.sort(descending=True)[1] events = events[idx] log_h = log_h[idx] return cox_ph_loss_sorted(log_h, events, eps) class CoxPHLoss(torch.nn.Module): """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def forward(self, log_h: 'Tensor', durations: 'Tensor', events: 'Tensor' ) ->Tensor: return cox_ph_loss(log_h, durations, events) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = r1 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) _tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True) tl.store(out_ptr0 + (r1 + 4 * x0), tmp6, xmask) @triton.jit def triton_red_fused_max_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp9 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (128 * x0 + r1 // 64), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~(rmask & xmask), 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (64 * tmp5 + r1 % 64), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = triton_helpers.maximum(_tmp9, tmp8) _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp9 = triton_helpers.max2(_tmp9, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_max_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_spl_fused_cumsum_exp_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ws_ptr, xnumel, rnumel, RBLOCK: tl.constexpr): XBLOCK: tl.constexpr = 1 rnumel = 16384 xoffset = tl.program_id(1) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) roffset = tl.program_id(0) * RBLOCK rindex = roffset + tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0 // 64, rmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + 0) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp12 = tl.num_programs(0) tmp13 = ws_ptr.to(tl.pointer_type(tl.uint64)) + xoffset * 1 * tmp12 tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~rmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (64 * tmp5 + r0 % 64), rmask, other=0.0) tmp10 = tmp7 - tmp9 tmp11 = tl_math.exp(tmp10) tmp14 = tmp11.to(tl.float32) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp16 = tl.reduce(tmp15, 0, _triton_helper_fn_add0) tmp17 = triton_helpers.exclusive_scan_decoupled_lookback(tmp13, tmp16, tl.program_id(0), _triton_helper_fn_add0, DTYPE_VALUE_AS_UINT=tl. uint32, DTYPE_PACK=tl.uint64) tmp18 = tl.associative_scan(tmp15, 0, _triton_helper_fn_add0) tmp19 = _triton_helper_fn_add0(tmp17, tmp18) tmp20 = tl.where(roffset == 0, tmp18, tmp19) tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp20, rmask) @triton.jit def triton_red_fused_add_log_mul_sub_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp12 = tl.load(in_ptr3 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) _tmp19 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp22 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (128 * x0 + r1 // 64), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~(rmask & xmask), 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (64 * tmp5 + r1 % 64), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp14 = tmp11 + tmp13 tmp15 = tmp7 - tmp14 tmp16 = tl.load(in_ptr4 + (64 * tmp5 + r1 % 64), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = _tmp19 + tmp18 _tmp19 = tl.where(rmask & xmask, tmp20, _tmp19) tmp21 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp23 = _tmp22 + tmp21 _tmp22 = tl.where(rmask & xmask, tmp23, _tmp22) tmp19 = tl.sum(_tmp19, 1)[:, None] tl.store(out_ptr0 + x0, tmp19, xmask) tmp22 = tl.sum(_tmp22, 1)[:, None] tl.store(out_ptr1 + x0, tmp22, xmask) @triton.jit def triton_per_fused_add_div_log_mul_neg_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp3 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) get_raw_stream(0) triton_per_fused_sort_0[grid(64)](arg1_1, buf1, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((2,), (1,), torch.float32) triton_red_fused_max_1[grid(2)](buf1, arg0_1, buf2, 2, 8192, XBLOCK =1, RBLOCK=2048, num_warps=16, num_stages=1) buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_2[grid(1)](buf2, buf3, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((16384,), (1,), torch.float32) workspace = empty_strided_cuda((512,), (1,), torch.uint8) workspace.zero_() triton_spl_fused_cumsum_exp_sub_3[split_scan_grid(1, 16384)](buf1, arg0_1, buf3, buf4, workspace, 1, 16384, RBLOCK=2048, num_warps =16, num_stages=1) del workspace buf5 = buf2 del buf2 buf7 = empty_strided_cuda((2,), (1,), torch.float32) triton_red_fused_add_log_mul_sub_sum_4[grid(2)](buf1, arg0_1, buf4, buf3, arg2_1, buf5, buf7, 2, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg0_1 del arg2_1 del buf1 del buf4 buf6 = buf3 del buf3 buf9 = buf6 del buf6 triton_per_fused_add_div_log_mul_neg_sub_sum_5[grid(1)](buf9, buf5, buf7, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf5 del buf7 return buf9, def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) def cox_ph_loss(log_h: 'Tensor', durations: 'Tensor', events: 'Tensor', eps: 'float'=1e-07) ->Tensor: """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ idx = durations.sort(descending=True)[1] events = events[idx] log_h = log_h[idx] return cox_ph_loss_sorted(log_h, events, eps) class CoxPHLossNew(torch.nn.Module): """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
rohanshad/pycox
CoxPHLoss
false
16,335
[ "BSD-2-Clause" ]
449
5483489d21f3441e53f78f9f8898ce607f41c632
https://github.com/rohanshad/pycox/tree/5483489d21f3441e53f78f9f8898ce607f41c632
MergeBlok
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xm/cxmk2kod6zgjturywionsuihaxqils4fvzrd7bziqpvptc3rgw43.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (48*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_1 => convolution # x_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_6, 256, grid=grid(256), stream=stream0) del primals_6 return (buf4, primals_3, primals_5, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class MergeBlok(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) self.conv3x3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, upsampled, shortcut): x = torch.cat([upsampled, shortcut], dim=1) x = self.conv1x1(x) x = F.relu(x) x = self.conv3x3(x) return x def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(256)](buf4, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf4, primals_3, primals_5, buf0, buf2 class MergeBlokNew(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) self.conv3x3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, input_0, input_1): primals_3 = self.conv1x1.weight primals_4 = self.conv1x1.bias primals_5 = self.conv3x3.weight primals_6 = self.conv3x3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
robtu328/TextBPN
MergeBlok
false
16,336
[ "MIT" ]
49
225844770e0107817be9fb86d53f873fa3eb07ae
https://github.com/robtu328/TextBPN/tree/225844770e0107817be9fb86d53f873fa3eb07ae
PatchMerge
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7d/c7d3urb7ems2pjtadoczuvfntkvhdu2ioqckmneablxert7oa3b4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # out_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_per_fused_native_layer_norm_0 = async_compile.triton('triton_per_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (64*x0)), tmp27, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf4 = empty_strided_cuda((4, 1, 1, 64), (64, 1, 256, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_per_fused_native_layer_norm_0.run(buf3, primals_1, primals_2, primals_3, buf0, buf4, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (4, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf5) return (reinterpret_tensor(buf5, (4, 1, 1, 4), (4, 4, 4, 1), 0), primals_1, buf0, buf3, reinterpret_tensor(buf4, (4, 64), (64, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def patchify(input, size): batch, height, width, dim = input.shape return input.view(batch, height // size, size, width // size, size, dim ).permute(0, 1, 3, 2, 4, 5).reshape(batch, height // size, width // size, -1) class PatchMerge(nn.Module): def __init__(self, in_dim, out_dim, window_size): super().__init__() self.window_size = window_size self.norm = nn.LayerNorm(in_dim * window_size * window_size) self.linear = nn.Linear(in_dim * window_size * window_size, out_dim, bias=False) def forward(self, input): out = patchify(input, self.window_size) out = self.norm(out) out = self.linear(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'window_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp27, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 1, 1, 64), (64, 1, 256, 1), torch.float32 ) get_raw_stream(0) triton_per_fused_native_layer_norm_0[grid(4)](buf3, primals_1, primals_2, primals_3, buf0, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (4, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf5) return reinterpret_tensor(buf5, (4, 1, 1, 4), (4, 4, 4, 1), 0 ), primals_1, buf0, buf3, reinterpret_tensor(buf4, (4, 64), (64, 1), 0 ), primals_4 def patchify(input, size): batch, height, width, dim = input.shape return input.view(batch, height // size, size, width // size, size, dim ).permute(0, 1, 3, 2, 4, 5).reshape(batch, height // size, width // size, -1) class PatchMergeNew(nn.Module): def __init__(self, in_dim, out_dim, window_size): super().__init__() self.window_size = window_size self.norm = nn.LayerNorm(in_dim * window_size * window_size) self.linear = nn.Linear(in_dim * window_size * window_size, out_dim, bias=False) def forward(self, input_0): primals_2 = self.norm.weight primals_3 = self.norm.bias primals_4 = self.linear.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
rosinality/vision-transformers-pytorch
PatchMerge
false
16,337
[ "MIT" ]
77
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
https://github.com/rosinality/vision-transformers-pytorch/tree/b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
UpBlok
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xm/cxmk2kod6zgjturywionsuihaxqils4fvzrd7bziqpvptc3rgw43.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (48*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_1 => convolution # x_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cr/ccri7cffjdxhdcglzfvzhu4wpte5acu3pxionui5nbusnbkkpvyl.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_7, %primals_8, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf4, primals_6, 256, grid=grid(256), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf6, primals_8, 1024, grid=grid(1024), stream=stream0) del primals_8 return (buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class UpBlok(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) self.conv3x3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1) def forward(self, upsampled, shortcut): x = torch.cat([upsampled, shortcut], dim=1) x = self.conv1x1(x) x = F.relu(x) x = self.conv3x3(x) x = F.relu(x) x = self.deconv(x) return x def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(256)](buf4, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf5 = extern_kernels.convolution(buf4, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(1024)](buf6, primals_8, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 return buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4 class UpBlokNew(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) self.conv3x3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1) def forward(self, input_0, input_1): primals_3 = self.conv1x1.weight primals_4 = self.conv1x1.bias primals_5 = self.conv3x3.weight primals_6 = self.conv3x3.bias primals_7 = self.deconv.weight primals_8 = self.deconv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
robtu328/TextBPN
UpBlok
false
16,338
[ "MIT" ]
49
225844770e0107817be9fb86d53f873fa3eb07ae
https://github.com/robtu328/TextBPN/tree/225844770e0107817be9fb86d53f873fa3eb07ae
LMCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/63/c63seyczb72s5izg3hnveogzau5c625kznds67hiulhylemxftir.py # Topologically Sorted Source Nodes: [logprob_select, mask], Original ATen: [aten.gather, aten.gt] # Source node to ATen node mapping: # logprob_select => gather # mask => gt # Graph fragment: # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg1_1, 1, %arg0_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) triton_poi_fused_gather_gt_0 = async_compile.triton('triton_poi_fused_gather_gt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*i64', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gather_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gather_gt_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.full([1], 0, tl.int64) tmp8 = tmp0 > tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [logprob_select, mask], Original ATen: [aten.gather, aten.gt] stream0 = get_raw_stream(0) triton_poi_fused_gather_gt_0.run(arg0_1, arg1_1, buf0, buf1, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data from torch.autograd import Variable class LMCriterion(nn.Module): def __init__(self): super(LMCriterion, self).__init__() def forward(self, input, target): logprob_select = torch.gather(input, 1, target) mask = target.data.gt(0) if isinstance(input, Variable): mask = Variable(mask, volatile=input.volatile) out = torch.masked_select(logprob_select, mask) loss = -torch.sum(out) return loss def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype =torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gather_gt_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp7 = tl.full([1], 0, tl.int64) tmp8 = tmp0 > tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_gather_gt_0[grid(16)](arg0_1, arg1_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class LMCriterionNew(nn.Module): def __init__(self): super(LMCriterionNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
roma-ghewari/visDial.pytorch
LMCriterion
false
16,339
[ "MIT" ]
123
03fe6e679170d54a985b6402f07fea4a5fb4dd73
https://github.com/roma-ghewari/visDial.pytorch/tree/03fe6e679170d54a985b6402f07fea4a5fb4dd73
PositionalEncodingGenerator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rn/crnibqzrcvjkcxz6ny2j3fyfteoj2sfxd5wrp6bsakdsktqpg4rb.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] # Source node to ATen node mapping: # out_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %permute), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_1, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PositionalEncodingGenerator(nn.Module): def __init__(self, dim): super().__init__() self.proj = nn.Conv2d(dim, dim, 3, padding=1, bias=False, groups=dim) def forward(self, input): out = input.permute(0, 3, 1, 2) out = self.proj(out) + out out = out.permute(0, 2, 3, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding =(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0) class PositionalEncodingGeneratorNew(nn.Module): def __init__(self, dim): super().__init__() self.proj = nn.Conv2d(dim, dim, 3, padding=1, bias=False, groups=dim) def forward(self, input_0): primals_2 = self.proj.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
rosinality/vision-transformers-pytorch
PositionalEncodingGenerator
false
16,340
[ "MIT" ]
77
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
https://github.com/rosinality/vision-transformers-pytorch/tree/b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
SILogLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l6/cl6t6trnewz2sgfaar2w4wzzoprs7nvch5ck3seujeoj2g76qz4q.py # Topologically Sorted Source Nodes: [input_1, log, log_1, g, var, mean, pow_1, mul, Dg, sqrt, mul_1], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add, aten.log, aten.var, aten.mean, aten.pow, aten.sqrt] # Source node to ATen node mapping: # Dg => add_5 # g => sub_5 # input_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4 # log => log # log_1 => log_1 # mean => mean # mul => mul_5 # mul_1 => mul_6 # pow_1 => pow_1 # sqrt => sqrt # var => var # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 1.0), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_4,), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%sub_5,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.15), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, %mul_5), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_5,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, 10), kwargs = {}) triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0 = async_compile.triton('triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] tmp44_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp44_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp44_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = (rindex // 4) % 4 r0 = rindex % 4 r2 = (rindex // 16) r3 = rindex tmp40 = tl.load(in_ptr1 + (r3), rmask, eviction_policy='evict_first', other=0.0) tmp0 = r1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = r0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*r2)), rmask, eviction_policy='evict_last', other=0.0) tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tmp39 = tl_math.log(tmp38) tmp41 = tl_math.log(tmp40) tmp42 = tmp39 - tmp41 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp44_mean_next, tmp44_m2_next, tmp44_weight_next = triton_helpers.welford_reduce( tmp43, tmp44_mean, tmp44_m2, tmp44_weight, roffset == 0 ) tmp44_mean = tl.where(rmask, tmp44_mean_next, tmp44_mean) tmp44_m2 = tl.where(rmask, tmp44_m2_next, tmp44_m2) tmp44_weight = tl.where(rmask, tmp44_weight_next, tmp44_weight) tmp48 = _tmp47 + tmp43 _tmp47 = tl.where(rmask, tmp48, _tmp47) tl.store(in_out_ptr0 + (tl.broadcast_to(r3, [XBLOCK, RBLOCK])), tmp38, rmask) tmp44_tmp, tmp45_tmp, tmp46_tmp = triton_helpers.welford( tmp44_mean, tmp44_m2, tmp44_weight, 1 ) tmp44 = tmp44_tmp[:, None] tmp45 = tmp45_tmp[:, None] tmp46 = tmp46_tmp[:, None] tmp47 = tl.sum(_tmp47, 1)[:, None] tmp49 = 255.0 tmp50 = tmp45 / tmp49 tmp51 = 256.0 tmp52 = tmp47 / tmp51 tmp53 = tmp52 * tmp52 tmp54 = 0.15 tmp55 = tmp53 * tmp54 tmp56 = tmp50 + tmp55 tmp57 = libdevice.sqrt(tmp56) tmp58 = 10.0 tmp59 = tmp57 * tmp58 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp59, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf3 = empty_strided_cuda((), (), torch.float32) buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [input_1, log, log_1, g, var, mean, pow_1, mul, Dg, sqrt, mul_1], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add, aten.log, aten.var, aten.mean, aten.pow, aten.sqrt] stream0 = get_raw_stream(0) triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0.run(buf1, buf6, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del buf1 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data.distributed import torch.nn as nn import torch.nn class SILogLoss(nn.Module): def __init__(self): super(SILogLoss, self).__init__() self.name = 'SILog' def forward(self, input, target, mask=None, interpolate=True): if interpolate: input = nn.functional.interpolate(input, target.shape[-2:], mode='bilinear', align_corners=True) if mask is not None: input = input[mask] target = target[mask] g = torch.log(input) - torch.log(target) Dg = torch.var(g) + 0.15 * torch.pow(torch.mean(g), 2) return 10 * torch.sqrt(Dg) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data.distributed import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] tmp44_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp44_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp44_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex // 4 % 4 r0 = rindex % 4 r2 = rindex // 16 r3 = rindex tmp40 = tl.load(in_ptr1 + r3, rmask, eviction_policy='evict_first', other=0.0) tmp0 = r1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = r0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tmp39 = tl_math.log(tmp38) tmp41 = tl_math.log(tmp40) tmp42 = tmp39 - tmp41 tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK]) tmp44_mean_next, tmp44_m2_next, tmp44_weight_next = (triton_helpers .welford_reduce(tmp43, tmp44_mean, tmp44_m2, tmp44_weight, roffset == 0)) tmp44_mean = tl.where(rmask, tmp44_mean_next, tmp44_mean) tmp44_m2 = tl.where(rmask, tmp44_m2_next, tmp44_m2) tmp44_weight = tl.where(rmask, tmp44_weight_next, tmp44_weight) tmp48 = _tmp47 + tmp43 _tmp47 = tl.where(rmask, tmp48, _tmp47) tl.store(in_out_ptr0 + tl.broadcast_to(r3, [XBLOCK, RBLOCK]), tmp38, rmask) tmp44_tmp, tmp45_tmp, tmp46_tmp = triton_helpers.welford(tmp44_mean, tmp44_m2, tmp44_weight, 1) tmp44_tmp[:, None] tmp45 = tmp45_tmp[:, None] tmp46_tmp[:, None] tmp47 = tl.sum(_tmp47, 1)[:, None] tmp49 = 255.0 tmp50 = tmp45 / tmp49 tmp51 = 256.0 tmp52 = tmp47 / tmp51 tmp53 = tmp52 * tmp52 tmp54 = 0.15 tmp55 = tmp53 * tmp54 tmp56 = tmp50 + tmp55 tmp57 = libdevice.sqrt(tmp56) tmp58 = 10.0 tmp59 = tmp57 * tmp58 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp59, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 buf3 = empty_strided_cuda((), (), torch.float32) buf6 = buf3 del buf3 get_raw_stream(0) triton_red_fused__to_copy__unsafe_index_add_arange_clamp_log_mean_mul_pow_sqrt_sub_var_0[ grid(1)](buf1, buf6, arg1_1, arg0_1, 1, 256, XBLOCK=1, RBLOCK= 256, num_warps=8, num_stages=1) del arg0_1 del arg1_1 del buf1 return buf6, class SILogLossNew(nn.Module): def __init__(self): super(SILogLossNew, self).__init__() self.name = 'SILog' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
rosivbus/aphantasia
SILogLoss
false
16,341
[ "MIT" ]
579
e739f21721222c3ea99aff3324f293fa5c5dd36d
https://github.com/rosivbus/aphantasia/tree/e739f21721222c3ea99aff3324f293fa5c5dd36d
gumbel_sampler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hy/chyjmpcv3z6vjvroncwkwtppqtqhncwlrryqrabhulidplglgwcn.py # Topologically Sorted Source Nodes: [add_, log_, neg_, add__1, log__1, neg__1, add, y_1, max_1], Original ATen: [aten.add, aten.log, aten.neg, aten._softmax, aten.max] # Source node to ATen node mapping: # add => add_2 # add_ => add # add__1 => add_1 # log_ => log # log__1 => log_1 # max_1 => max_1 # neg_ => neg # neg__1 => neg_1 # y_1 => div_1, exp, sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-20), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 1e-20), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%log_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg_1), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div_1 : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%div_1, 1), kwargs = {}) triton_poi_fused__softmax_add_log_max_neg_0 = async_compile.triton('triton_poi_fused__softmax_add_log_max_neg_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_log_max_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_log_max_neg_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = 1e-20 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = -tmp4 tmp6 = tmp5 + tmp2 tmp7 = tl_math.log(tmp6) tmp8 = -tmp7 tmp9 = tmp0 + tmp8 tmp10 = 1.0 tmp11 = tmp9 * tmp10 tmp14 = tmp13 + tmp2 tmp15 = tl_math.log(tmp14) tmp16 = -tmp15 tmp17 = tmp16 + tmp2 tmp18 = tl_math.log(tmp17) tmp19 = -tmp18 tmp20 = tmp12 + tmp19 tmp21 = tmp20 * tmp10 tmp22 = triton_helpers.maximum(tmp11, tmp21) tmp25 = tmp24 + tmp2 tmp26 = tl_math.log(tmp25) tmp27 = -tmp26 tmp28 = tmp27 + tmp2 tmp29 = tl_math.log(tmp28) tmp30 = -tmp29 tmp31 = tmp23 + tmp30 tmp32 = tmp31 * tmp10 tmp33 = triton_helpers.maximum(tmp22, tmp32) tmp36 = tmp35 + tmp2 tmp37 = tl_math.log(tmp36) tmp38 = -tmp37 tmp39 = tmp38 + tmp2 tmp40 = tl_math.log(tmp39) tmp41 = -tmp40 tmp42 = tmp34 + tmp41 tmp43 = tmp42 * tmp10 tmp44 = triton_helpers.maximum(tmp33, tmp43) tmp45 = tmp11 - tmp44 tmp46 = 2.0 tmp47 = tmp45 * tmp46 tmp48 = tl_math.exp(tmp47) tmp49 = tmp21 - tmp44 tmp50 = tmp49 * tmp46 tmp51 = tl_math.exp(tmp50) tmp52 = tmp48 + tmp51 tmp53 = tmp32 - tmp44 tmp54 = tmp53 * tmp46 tmp55 = tl_math.exp(tmp54) tmp56 = tmp52 + tmp55 tmp57 = tmp43 - tmp44 tmp58 = tmp57 * tmp46 tmp59 = tl_math.exp(tmp58) tmp60 = tmp56 + tmp59 tmp61 = tmp48 / tmp60 tmp62 = tmp51 / tmp60 tmp63 = triton_helpers.maximum(tmp61, tmp62) tmp64 = tmp55 / tmp60 tmp65 = triton_helpers.maximum(tmp63, tmp64) tmp66 = tmp59 / tmp60 tmp67 = triton_helpers.maximum(tmp65, tmp66) tmp68 = tmp61 > tmp62 tmp69 = tmp61 == tmp62 tmp70 = tmp61 != tmp61 tmp71 = tmp62 != tmp62 tmp72 = tmp70 > tmp71 tmp73 = tmp68 | tmp72 tmp74 = tmp70 & tmp71 tmp75 = tmp69 | tmp74 tmp76 = tl.full([1], 0, tl.int64) tmp77 = tl.full([1], 1, tl.int64) tmp78 = tmp76 < tmp77 tmp79 = tmp75 & tmp78 tmp80 = tmp73 | tmp79 tmp81 = tl.where(tmp80, tmp61, tmp62) tmp82 = tl.where(tmp80, tmp76, tmp77) tmp83 = tmp81 > tmp64 tmp84 = tmp81 == tmp64 tmp85 = tmp81 != tmp81 tmp86 = tmp64 != tmp64 tmp87 = tmp85 > tmp86 tmp88 = tmp83 | tmp87 tmp89 = tmp85 & tmp86 tmp90 = tmp84 | tmp89 tmp91 = tl.full([1], 2, tl.int64) tmp92 = tmp82 < tmp91 tmp93 = tmp90 & tmp92 tmp94 = tmp88 | tmp93 tmp95 = tl.where(tmp94, tmp81, tmp64) tmp96 = tl.where(tmp94, tmp82, tmp91) tmp97 = tmp95 > tmp66 tmp98 = tmp95 == tmp66 tmp99 = tmp95 != tmp95 tmp100 = tmp66 != tmp66 tmp101 = tmp99 > tmp100 tmp102 = tmp97 | tmp101 tmp103 = tmp99 & tmp100 tmp104 = tmp98 | tmp103 tmp105 = tl.full([1], 3, tl.int64) tmp106 = tmp96 < tmp105 tmp107 = tmp104 & tmp106 tmp108 = tmp102 | tmp107 tmp109 = tl.where(tmp108, tmp95, tmp66) tmp110 = tl.where(tmp108, tmp96, tmp105) tl.store(out_ptr0 + (x0), tmp44, xmask) tl.store(out_ptr1 + (x0), tmp60, xmask) tl.store(out_ptr2 + (x0), tmp67, xmask) tl.store(out_ptr3 + (x0), tmp110, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jo/cjondp6nyqlle7pnoswba7trwqsobb3qpdwts6jbetiqo22f75xk.py # Topologically Sorted Source Nodes: [add_, log_, neg_, add__1, log__1, neg__1, add, y_1, y_hard, float_1, sub, y_2], Original ATen: [aten.add, aten.log, aten.neg, aten._softmax, aten.eq, aten._to_copy, aten.sub] # Source node to ATen node mapping: # add => add_2 # add_ => add # add__1 => add_1 # float_1 => convert_element_type # log_ => log # log__1 => log_1 # neg_ => neg # neg__1 => neg_1 # sub => sub_1 # y_1 => div_1, exp # y_2 => add_3 # y_hard => eq # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-20), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 1e-20), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%log_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg_1), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 1), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_1 : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%div_1, %expand), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convert_element_type, %div_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %div_1), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %neg_1), kwargs = {}) triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1 = async_compile.triton('triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1', 'mutated_arg_names': ['in_ptr1', 'out_ptr2'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp12 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp2 = 1e-20 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = -tmp4 tmp6 = tmp5 + tmp2 tmp7 = tl_math.log(tmp6) tmp8 = -tmp7 tmp9 = tmp0 + tmp8 tmp10 = 1.0 tmp11 = tmp9 * tmp10 tmp13 = tmp11 - tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp20 = tmp18 == tmp19 tmp21 = tmp20.to(tl.float32) tmp22 = tmp21 - tmp18 tmp23 = tmp22 + tmp18 tl.store(out_ptr0 + (x2), tmp23, xmask) tl.store(out_ptr2 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [add_, log_, neg_, add__1, log__1, neg__1, add, y_1, max_1], Original ATen: [aten.add, aten.log, aten.neg, aten._softmax, aten.max] stream0 = get_raw_stream(0) triton_poi_fused__softmax_add_log_max_neg_0.run(arg1_1, arg0_1, buf0, buf1, buf2, buf3, 4, grid=grid(4), stream=stream0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_, log_, neg_, add__1, log__1, neg__1, add, y_1, y_hard, float_1, sub, y_2], Original ATen: [aten.add, aten.log, aten.neg, aten._softmax, aten.eq, aten._to_copy, aten.sub] triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1.run(arg1_1, arg0_1, buf0, buf1, buf2, buf4, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 del buf0 del buf1 del buf2 return (buf4, reinterpret_tensor(buf3, (1, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data class gumbel_sampler(nn.Module): def __init__(self): super(gumbel_sampler, self).__init__() def forward(self, input, noise, temperature=0.5): eps = 1e-20 noise.data.add_(eps).log_().neg_() noise.data.add_(eps).log_().neg_() y = (input + noise) / temperature y = F.softmax(y) max_val, max_idx = torch.max(y, y.dim() - 1) y_hard = y == max_val.view(-1, 1).expand_as(y) y = (y_hard.float() - y).detach() + y return y, max_idx.view(1, -1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_log_max_neg_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 1e-20 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = -tmp4 tmp6 = tmp5 + tmp2 tmp7 = tl_math.log(tmp6) tmp8 = -tmp7 tmp9 = tmp0 + tmp8 tmp10 = 1.0 tmp11 = tmp9 * tmp10 tmp14 = tmp13 + tmp2 tmp15 = tl_math.log(tmp14) tmp16 = -tmp15 tmp17 = tmp16 + tmp2 tmp18 = tl_math.log(tmp17) tmp19 = -tmp18 tmp20 = tmp12 + tmp19 tmp21 = tmp20 * tmp10 tmp22 = triton_helpers.maximum(tmp11, tmp21) tmp25 = tmp24 + tmp2 tmp26 = tl_math.log(tmp25) tmp27 = -tmp26 tmp28 = tmp27 + tmp2 tmp29 = tl_math.log(tmp28) tmp30 = -tmp29 tmp31 = tmp23 + tmp30 tmp32 = tmp31 * tmp10 tmp33 = triton_helpers.maximum(tmp22, tmp32) tmp36 = tmp35 + tmp2 tmp37 = tl_math.log(tmp36) tmp38 = -tmp37 tmp39 = tmp38 + tmp2 tmp40 = tl_math.log(tmp39) tmp41 = -tmp40 tmp42 = tmp34 + tmp41 tmp43 = tmp42 * tmp10 tmp44 = triton_helpers.maximum(tmp33, tmp43) tmp45 = tmp11 - tmp44 tmp46 = 2.0 tmp47 = tmp45 * tmp46 tmp48 = tl_math.exp(tmp47) tmp49 = tmp21 - tmp44 tmp50 = tmp49 * tmp46 tmp51 = tl_math.exp(tmp50) tmp52 = tmp48 + tmp51 tmp53 = tmp32 - tmp44 tmp54 = tmp53 * tmp46 tmp55 = tl_math.exp(tmp54) tmp56 = tmp52 + tmp55 tmp57 = tmp43 - tmp44 tmp58 = tmp57 * tmp46 tmp59 = tl_math.exp(tmp58) tmp60 = tmp56 + tmp59 tmp61 = tmp48 / tmp60 tmp62 = tmp51 / tmp60 tmp63 = triton_helpers.maximum(tmp61, tmp62) tmp64 = tmp55 / tmp60 tmp65 = triton_helpers.maximum(tmp63, tmp64) tmp66 = tmp59 / tmp60 tmp67 = triton_helpers.maximum(tmp65, tmp66) tmp68 = tmp61 > tmp62 tmp69 = tmp61 == tmp62 tmp70 = tmp61 != tmp61 tmp71 = tmp62 != tmp62 tmp72 = tmp70 > tmp71 tmp73 = tmp68 | tmp72 tmp74 = tmp70 & tmp71 tmp75 = tmp69 | tmp74 tmp76 = tl.full([1], 0, tl.int64) tmp77 = tl.full([1], 1, tl.int64) tmp78 = tmp76 < tmp77 tmp79 = tmp75 & tmp78 tmp80 = tmp73 | tmp79 tmp81 = tl.where(tmp80, tmp61, tmp62) tmp82 = tl.where(tmp80, tmp76, tmp77) tmp83 = tmp81 > tmp64 tmp84 = tmp81 == tmp64 tmp85 = tmp81 != tmp81 tmp86 = tmp64 != tmp64 tmp87 = tmp85 > tmp86 tmp88 = tmp83 | tmp87 tmp89 = tmp85 & tmp86 tmp90 = tmp84 | tmp89 tmp91 = tl.full([1], 2, tl.int64) tmp92 = tmp82 < tmp91 tmp93 = tmp90 & tmp92 tmp94 = tmp88 | tmp93 tmp95 = tl.where(tmp94, tmp81, tmp64) tmp96 = tl.where(tmp94, tmp82, tmp91) tmp97 = tmp95 > tmp66 tmp98 = tmp95 == tmp66 tmp99 = tmp95 != tmp95 tmp100 = tmp66 != tmp66 tmp101 = tmp99 > tmp100 tmp102 = tmp97 | tmp101 tmp103 = tmp99 & tmp100 tmp104 = tmp98 | tmp103 tmp105 = tl.full([1], 3, tl.int64) tmp106 = tmp96 < tmp105 tmp107 = tmp104 & tmp106 tmp108 = tmp102 | tmp107 tl.where(tmp108, tmp95, tmp66) tmp110 = tl.where(tmp108, tmp96, tmp105) tl.store(out_ptr0 + x0, tmp44, xmask) tl.store(out_ptr1 + x0, tmp60, xmask) tl.store(out_ptr2 + x0, tmp67, xmask) tl.store(out_ptr3 + x0, tmp110, xmask) @triton.jit def triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp12 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = 1e-20 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = -tmp4 tmp6 = tmp5 + tmp2 tmp7 = tl_math.log(tmp6) tmp8 = -tmp7 tmp9 = tmp0 + tmp8 tmp10 = 1.0 tmp11 = tmp9 * tmp10 tmp13 = tmp11 - tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp20 = tmp18 == tmp19 tmp21 = tmp20.to(tl.float32) tmp22 = tmp21 - tmp18 tmp23 = tmp22 + tmp18 tl.store(out_ptr0 + x2, tmp23, xmask) tl.store(out_ptr2 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused__softmax_add_log_max_neg_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax__to_copy_add_eq_log_neg_sub_1[grid(16)]( arg1_1, arg0_1, buf0, buf1, buf2, buf4, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del buf0 del buf1 del buf2 return buf4, reinterpret_tensor(buf3, (1, 4), (4, 1), 0) class gumbel_samplerNew(nn.Module): def __init__(self): super(gumbel_samplerNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
roma-ghewari/visDial.pytorch
gumbel_sampler
false
16,342
[ "MIT" ]
123
03fe6e679170d54a985b6402f07fea4a5fb4dd73
https://github.com/roma-ghewari/visDial.pytorch/tree/03fe6e679170d54a985b6402f07fea4a5fb4dd73
MultiHeadedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rm/crmjcbrhesyjltwjwo2gy5ktnw7trv24ctlurkfme6ykhtfquq32.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rb/crbncgepp7pchewiviz2ecap4hkql77bxprjbw2ciuujmpu57s6c.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pq/cpqnfrogm4dnzim2vyszfmugd6fc43gfnmxicoezmiidejzudrdz.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bb/cbby6op7dmkjsypxm4o3urasth73g6q5oi4ddo6uk6dsuv6off2v.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/we/cwe54p4p4jvwbdktkpj3wy2coheu6f3r3dgvi7ozm7xjfk4mgbwx.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # out_2 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/36/c36mdvdlmkr4g6rcquwj2pbp2ke2mrvmhx3r7akoexvrkzzfhdye.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_5), kwargs = {}) triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0) buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, primals_3, buf2, 16, 4, grid=grid(16, 4), stream=stream0) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf0, primals_3, buf6, 16, 4, grid=grid(16, 4), stream=stream0) del buf0 del primals_3 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_6.run(buf10, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 return (buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class MultiHeadedAttention(nn.Module): def __init__(self, dim, n_head, bias=True, dropout=0): super().__init__() self.dim_head = dim // n_head self.n_head = n_head self.qkv = nn.Linear(dim, dim * 3, bias=bias) self.dropout = nn.Dropout(dropout) self.linear = nn.Linear(dim, dim) def forward(self, input): batch, length, dim = input.shape qkv = self.qkv(input).reshape(batch, length, 3, self.n_head, self. dim_head).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) attn = q @ k.transpose(-2, -1) / math.sqrt(self.dim_head) attn = torch.softmax(attn, -1) attn = self.dropout(attn) out = attn @ v out = out.transpose(1, 2).reshape(batch, length, dim) out = self.linear(out) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf0, primals_3, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_3 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 triton_poi_fused_add_6[grid(64)](buf10, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0) class MultiHeadedAttentionNew(nn.Module): def __init__(self, dim, n_head, bias=True, dropout=0): super().__init__() self.dim_head = dim // n_head self.n_head = n_head self.qkv = nn.Linear(dim, dim * 3, bias=bias) self.dropout = nn.Dropout(dropout) self.linear = nn.Linear(dim, dim) def forward(self, input_0): primals_2 = self.qkv.weight primals_3 = self.qkv.bias primals_4 = self.linear.weight primals_5 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
rosinality/vision-transformers-pytorch
MultiHeadedAttention
false
16,343
[ "MIT" ]
77
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
https://github.com/rosinality/vision-transformers-pytorch/tree/b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
ConvWithBatchNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rl/crlzvualg2za225vkja5fjpnxss76chgi2glfm2hpmsgp2hm7zo3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ConvWithBatchNorm(nn.Module): def __init__(self, in_channels, out_channels, spacetime_ndim, kernel_size=3, normalization=None, activation='ReLU'): super(ConvWithBatchNorm, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.spacetime_ndim = spacetime_ndim self.kernel_size = kernel_size self.normalization = normalization self.activation = activation if spacetime_ndim == 2: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding='same') else: self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, padding='same') self.relu = nn.ReLU() self.swish = nn.SiLU() self.leaky_relu = nn.LeakyReLU(0.1) def forward(self, x): x = self.conv(x) if self.normalization == 'instance': x = self.instance_normalization(x) elif self.normalization == 'batch': x = self.batch_normalization(x) if self.activation == 'ReLU': x = self.relu(x) elif self.activation == 'swish': x = self.swish(x) elif self.activation == 'lrel': x = self.leaky_relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'spacetime_ndim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 class ConvWithBatchNormNew(nn.Module): def __init__(self, in_channels, out_channels, spacetime_ndim, kernel_size=3, normalization=None, activation='ReLU'): super(ConvWithBatchNormNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.spacetime_ndim = spacetime_ndim self.kernel_size = kernel_size self.normalization = normalization self.activation = activation if spacetime_ndim == 2: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding='same') else: self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, padding='same') self.relu = nn.ReLU() self.swish = nn.SiLU() self.leaky_relu = nn.LeakyReLU(0.1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
royerloic/aydin
ConvWithBatchNorm
false
16,344
[ "BSD-3-Clause" ]
78
f9c61a24030891d008c318b250da5faec69fcd7d
https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d
BasicConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/x6/cx6am3aci2lwqvjehz2z7zym4yenhm4zyrn5h4u6yclronjplabq.py # Topologically Sorted Source Nodes: [conv2d_1, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # instance_norm => add, rsqrt, var_mean # x => gt, mul_1, where # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.001), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_8, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.001 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tmp31 = tmp30 > tmp26 tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp30, xmask) tl.store(out_ptr3 + (r2 + (16*x3)), tmp31, xmask) tl.store(out_ptr4 + (x3), tmp24, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward] triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1.run(buf3, primals_5, buf4, buf8, buf9, buf7, 16, 16, grid=grid(16), stream=stream0) del primals_5 return (buf8, primals_1, primals_3, primals_4, buf1, buf3, reinterpret_tensor(buf7, (16, ), (1, ), 0), buf9, reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class BasicConvBlock(nn.Module): def __init__(self, in_channels, out_channels): super(BasicConvBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) self.norm = nn.InstanceNorm2d(out_channels) def forward(self, x): x = F.leaky_relu(self.norm(self.conv2(self.conv1(x))), negative_slope=0.001, inplace=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1( in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.001 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tmp31 = tmp30 > tmp26 tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp30, xmask) tl.store(out_ptr3 + (r2 + 16 * x3), tmp31, xmask) tl.store(out_ptr4 + x3, tmp24, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_leaky_relu_backward_1[ grid(16)](buf3, primals_5, buf4, buf8, buf9, buf7, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_5 return (buf8, primals_1, primals_3, primals_4, buf1, buf3, reinterpret_tensor(buf7, (16,), (1,), 0), buf9, reinterpret_tensor( buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class BasicConvBlockNew(nn.Module): def __init__(self, in_channels, out_channels): super(BasicConvBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) self.norm = nn.InstanceNorm2d(out_channels) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
royerloic/aydin
BasicConvBlock
false
16,345
[ "BSD-3-Clause" ]
78
f9c61a24030891d008c318b250da5faec69fcd7d
https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d
DotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm] extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attention_output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), arg1_1, out=buf3) del arg1_1 return (buf3, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DotProductAttention(nn.Module): """ Dot product attention. Given a set of vector values, and a vector query, attention is a technique to compute a weighted sum of the values, dependent on the query. NOTE: Here we use the terminology in Stanford cs224n-2018-lecture11. """ def __init__(self): super(DotProductAttention, self).__init__() def forward(self, queries, values): """ Args: queries: N x To x H values : N x Ti x H Returns: output: N x To x H attention_distribution: N x To x Ti """ batch_size = queries.size(0) queries.size(2) input_lengths = values.size(1) attention_scores = torch.bmm(queries, values.transpose(1, 2)) attention_distribution = F.softmax(attention_scores.view(-1, input_lengths), dim=1).view(batch_size, -1, input_lengths) attention_output = torch.bmm(attention_distribution, values) return attention_output, attention_distribution def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), arg1_1, out=buf3) del arg1_1 return buf3, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) class DotProductAttentionNew(nn.Module): """ Dot product attention. Given a set of vector values, and a vector query, attention is a technique to compute a weighted sum of the values, dependent on the query. NOTE: Here we use the terminology in Stanford cs224n-2018-lecture11. """ def __init__(self): super(DotProductAttentionNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
rupeshshrestha123/end2end-asr-pytorch
DotProductAttention
false
16,346
[ "MIT" ]
250
8aada8f7cbe90e1d0b05d505042d9e42b8e4dd52
https://github.com/rupeshshrestha123/end2end-asr-pytorch/tree/8aada8f7cbe90e1d0b05d505042d9e42b8e4dd52
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nj/cnjqzm7hm3u6ggjfvpspnl6pii56jckgwol3ydau4qesjyoteutl.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %permute_1], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = (xindex // 32) x1 = (xindex // 8) % 4 x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x2) + (16*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gj/cgjeqalj3tj2k5zxudyuzg5uvy3eelkmu2pzeffdkeicr4q7pgpz.py # Topologically Sorted Source Nodes: [energy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # energy => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f5/cf5347orlkm5ylmh4iouw6qvowwebewwg66dnqrhzt7tg7a4irp5.py # Topologically Sorted Source Nodes: [energy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # energy => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2d/c2dr3b76oqwlimt46tz4bftt63mdbj2eclhl6o6li35qwyf5jdzo.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_5, [4, 1]), kwargs = {}) triton_poi_fused_repeat_3 = async_compile.triton('triton_poi_fused_repeat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wi/cwihcoqftbkppgwobea5wbaobihbvfvl6jipij3fbwihs3o366tl.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [energy], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_3.run(primals_5, buf4, 16, grid=grid(16), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_4.run(buf6, buf7, 16, grid=grid(16), stream=stream0) return (reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf3, buf7, reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional as F class Attention(nn.Module): def __init__(self, hidden_size): super(Attention, self).__init__() self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.uniform_(-stdv, stdv) def forward(self, hidden, encoder_outputs): timestep = encoder_outputs.size(0) h = hidden.repeat(timestep, 1, 1).transpose(0, 1) encoder_outputs = encoder_outputs.transpose(0, 1) attn_energies = self.score(h, encoder_outputs) return F.relu(attn_energies).unsqueeze(1) def score(self, hidden, encoder_outputs): energy = F.softmax(self.attn(torch.cat([hidden, encoder_outputs], 2))) energy = energy.transpose(1, 2) v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_3[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) return reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf3, buf7, reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, hidden_size): super(AttentionNew, self).__init__() self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.uniform_(-stdv, stdv) def score(self, hidden, encoder_outputs): energy = F.softmax(self.attn(torch.cat([hidden, encoder_outputs], 2))) energy = energy.transpose(1, 2) v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def forward(self, input_0, input_1): primals_4 = self.v primals_3 = self.attn.weight primals_5 = self.attn.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ronak-44/smiles-transformer
Attention
false
16,347
[ "MIT" ]
154
8965ca6211da721a8b708d1b3fa567b1bfd907cf
https://github.com/ronak-44/smiles-transformer/tree/8965ca6211da721a8b708d1b3fa567b1bfd907cf
CPAMDec
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j4/cj4f6qdb45emg4zrdv5vzxtw2vswpyt2rqyalr6mxgomzeyk55j5.py # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # out_2 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %view_6), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf3, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_7 del primals_8 buf8 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (4, 4, 16), (64, 1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(primals_9, buf8, primals_1, buf9, 256, grid=grid(256), stream=stream0) return (buf9, primals_1, primals_3, primals_9, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, buf8, reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torchvision.datasets import * from torch.nn import Conv2d from torch.nn import Parameter from torch.nn import Linear from torch.nn import Softmax from torchvision.transforms import * class CPAMDec(Module): """ CPAM decoding module """ def __init__(self, in_channels): super(CPAMDec, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) self.conv_query = Conv2d(in_channels=in_channels, out_channels= in_channels // 4, kernel_size=1) self.conv_key = Linear(in_channels, in_channels // 4) self.conv_value = Linear(in_channels, in_channels) def forward(self, x, y): """ inputs : x : input feature(N,C,H,W) y:gathering centers(N,K,M) returns : out : compact position attention feature attention map: (H*W)*M """ m_batchsize, C, width, height = x.size() m_batchsize, K, _M = y.size() proj_query = self.conv_query(x).view(m_batchsize, -1, width * height ).permute(0, 2, 1) proj_key = self.conv_key(y).view(m_batchsize, K, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) attention = self.softmax(energy) proj_value = self.conv_value(y).permute(0, 2, 1) out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.scale * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torchvision.datasets import * from torch.nn import Conv2d from torch.nn import Parameter from torch.nn import Linear from torch.nn import Softmax from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf3, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf7) del primals_7 del primals_8 buf8 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (4, 4, 16), (64, 1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_9, buf8, primals_1, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf9, primals_1, primals_3, primals_9, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, buf8, reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 16, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0) class CPAMDecNew(Module): """ CPAM decoding module """ def __init__(self, in_channels): super(CPAMDecNew, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) self.conv_query = Conv2d(in_channels=in_channels, out_channels= in_channels // 4, kernel_size=1) self.conv_key = Linear(in_channels, in_channels // 4) self.conv_value = Linear(in_channels, in_channels) def forward(self, input_0, input_1): primals_4 = self.scale primals_3 = self.conv_query.weight primals_6 = self.conv_query.bias primals_5 = self.conv_key.weight primals_9 = self.conv_key.bias primals_7 = self.conv_value.weight primals_8 = self.conv_value.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
ruijieren98/DANet
CPAMDec
false
16,348
[ "MIT" ]
2,190
e38d61e371179833c08888fd5a1ee444cf5bd875
https://github.com/ruijieren98/DANet/tree/e38d61e371179833c08888fd5a1ee444cf5bd875
ShiftedSoftplus
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ff/cfflcrfbkqug6wu6spd6cdudrcnssg32k3762qutew77yjljfd4b.py # Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub] # Source node to ATen node mapping: # softplus => exp, gt, log1p, where # sub => sub # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, 0.6931471824645996), kwargs = {}) triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471824645996 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.tensorboard class ShiftedSoftplus(nn.Module): def __init__(self): super().__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.tensorboard assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471824645996 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ShiftedSoftplusNew(nn.Module): def __init__(self): super().__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hengwei-chan/3D_SBDD
ShiftedSoftplus
false
16,349
[ "MIT" ]
67
eda6d51aaf01ef25581a46920a25161678fab76d
https://github.com/hengwei-chan/3D_SBDD/tree/eda6d51aaf01ef25581a46920a25161678fab76d
CausalConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pz/cpzxz6lkuih5yejx2jgx2k2tt7datj3a4ghmi4d5empkhgoz5dif.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [1], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 5) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 80, grid=grid(80), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4, 4), (20, 5, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 5 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(80)](buf1, primals_2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (20, 5, 1), 0 ), primals_1, primals_3 class CausalConv1dNew(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1dNew, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, input_0): primals_1 = self.causal_conv.weight primals_2 = self.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
sagelywizard/snail
CausalConv1d
false
16,350
[ "MIT" ]
100
1c64787aa970c82f65c3c9d253531d1c2b1bee08
https://github.com/sagelywizard/snail/tree/1c64787aa970c82f65c3c9d253531d1c2b1bee08
softCE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zb/czb62yoaczhirznmghd4cd2ubyclhlw7vjrb7og3tsiz7po6ednd.py # Topologically Sorted Source Nodes: [sub, exp_score, exp_score_1, scores_logp], Original ATen: [aten.sub, aten.exp, aten.mul, aten._log_softmax] # Source node to ATen node mapping: # exp_score => exp # exp_score_1 => mul # scores_logp => amax, sub_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %expand), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %arg1_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_exp_mul_sub_0 = async_compile.triton('triton_poi_fused__log_softmax_exp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_exp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_exp_mul_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (x2), xmask) tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 * tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gk/cgkk34ipw2bjcd5qqu65fal2ppmhrn6wmp6oz75jz736tzsrtapi.py # Topologically Sorted Source Nodes: [prob_score, neg, scores_logp, mul_1, CE, CE_1], Original ATen: [aten.div, aten.neg, aten._log_softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # CE => sum_3 # CE_1 => div_1 # mul_1 => mul_1 # neg => neg # prob_score => div # scores_logp => exp_1, log, sub_2, sum_2 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %expand_1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), None) tmp1 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (r2), None) tmp11 = tl.load(in_ptr1 + (4*r1), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = -tmp8 tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = tmp9 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 0.25 tmp29 = tmp27 * tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp29, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, exp_score, exp_score_1, scores_logp], Original ATen: [aten.sub, aten.exp, aten.mul, aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_exp_mul_sub_0.run(arg0_1, arg1_1, buf0, buf1, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [prob_score, neg, scores_logp, mul_1, CE, CE_1], Original ATen: [aten.div, aten.neg, aten._log_softmax, aten.mul, aten.sum] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf4, buf0, buf1, 1, 16, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class softCE(nn.Module): """ The objective function for the distant supervised typing. Parameters ---------- if_average : ``bool``, optional, (default = True). Whether to average over batches or not. """ def __init__(self, if_average=True): super(softCE, self).__init__() self.logSoftmax = nn.LogSoftmax(dim=1) self.if_average = if_average @staticmethod def soft_max(vec, mask): """ Calculate the softmax for the input with regard to a mask. Parameters ---------- vec : ``torch.FloatTensor``, required. The input of the softmax. mask : ``torch.ByteTensor`` , required. The mask for the softmax input. """ batch_size = vec.size(0) max_score, _idx = torch.max(vec, 1, keepdim=True) exp_score = torch.exp(vec - max_score.expand_as(vec)) exp_score = exp_score * mask exp_score_sum = torch.sum(exp_score, 1).view(batch_size, 1).expand_as( exp_score) prob_score = exp_score / exp_score_sum return prob_score def forward(self, scores, target): """ Calculate the cross entropy loss for distant supervision. Parameters ---------- scores : ``torch.FloatTensor``, required. The input of the softmax. target : ``torch.ByteTensor`` , required. The target as the mask for the softmax input. """ supervision_p = softCE.soft_max(scores, target) scores_logp = self.logSoftmax(scores) CE = (-supervision_p * scores_logp).sum() if self.if_average: CE = CE / scores.size(0) return CE def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_exp_mul_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + x2, xmask) tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 * tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + r2, None) tmp11 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = -tmp8 tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = tmp9 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 0.25 tmp29 = tmp27 * tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_exp_mul_sub_0[grid(16)](arg0_1, arg1_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf4, buf0, buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf4, class softCENew(nn.Module): """ The objective function for the distant supervised typing. Parameters ---------- if_average : ``bool``, optional, (default = True). Whether to average over batches or not. """ def __init__(self, if_average=True): super(softCENew, self).__init__() self.logSoftmax = nn.LogSoftmax(dim=1) self.if_average = if_average @staticmethod def soft_max(vec, mask): """ Calculate the softmax for the input with regard to a mask. Parameters ---------- vec : ``torch.FloatTensor``, required. The input of the softmax. mask : ``torch.ByteTensor`` , required. The mask for the softmax input. """ batch_size = vec.size(0) max_score, _idx = torch.max(vec, 1, keepdim=True) exp_score = torch.exp(vec - max_score.expand_as(vec)) exp_score = exp_score * mask exp_score_sum = torch.sum(exp_score, 1).view(batch_size, 1).expand_as( exp_score) prob_score = exp_score / exp_score_sum return prob_score def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
s-tatsu/AutoNER
softCE
false
16,351
[ "Apache-2.0" ]
446
75f8d092a5bf83fabf4ac4e879fab9120bbcd083
https://github.com/s-tatsu/AutoNER/tree/75f8d092a5bf83fabf4ac4e879fab9120bbcd083
AttnConnector
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ms/cmsgi2mt4l2csxphaxovjmgng4w7kcopgj2oxi7cojv25ibumrxh.py # Topologically Sorted Source Nodes: [tiled_query, add, fc1], Original ATen: [aten.repeat, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # fc1 => tanh # tiled_query => repeat # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4, 1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, %view_1), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_repeat_tanh_0 = async_compile.triton('triton_poi_fused_add_repeat_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/r5/cr5af6fzsr2gmixtrj6ikns5kjfz34fn7v3fwkmuybwhq2ocdmex.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_4,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zd/czdeq2ohbgubcyeps2ukquvfhigxtyega57i24ketclusfgmyedi.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze_1, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 8), (8, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [query_embeded], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [tiled_query, add, fc1], Original ATen: [aten.repeat, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_repeat_tanh_0.run(buf2, buf0, primals_6, 64, grid=grid(64), stream=stream0) del primals_6 buf4 = reinterpret_tensor(buf0, (16, 1), (1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0), primals_9, out=buf6) del buf5 buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf6, primals_4, buf7, 32, grid=grid(32), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(primals_10, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf8) del primals_11 return (reinterpret_tensor(buf8, (1, 4, 4), (16, 4, 1), 0), primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf2, buf4, buf7, primals_10, reinterpret_tensor(primals_9, (4, 4, 4), (16, 1, 4), 0), primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class AttnConnector(nn.Module): def __init__(self, rnn_cell, query_size, key_size, content_size, output_size, attn_size): super(AttnConnector, self).__init__() self.query_embed = nn.Linear(query_size, attn_size) self.key_embed = nn.Linear(key_size, attn_size) self.attn_w = nn.Linear(attn_size, 1) if rnn_cell == 'lstm': self.project_h = nn.Linear(content_size + query_size, output_size) self.project_c = nn.Linear(content_size + query_size, output_size) else: self.project = nn.Linear(content_size + query_size, output_size) self.rnn_cell = rnn_cell self.query_size = query_size self.key_size = key_size self.content_size = content_size self.output_size = output_size def forward(self, queries, keys, contents): batch_size = keys.size(0) num_key = keys.size(1) query_embeded = self.query_embed(queries) key_embeded = self.key_embed(keys) tiled_query = query_embeded.unsqueeze(1).repeat(1, num_key, 1) fc1 = F.tanh(tiled_query + key_embeded) attn = self.attn_w(fc1).squeeze(-1) attn = F.sigmoid(attn.view(-1, num_key)).view(batch_size, -1, num_key) mix = torch.bmm(attn, contents).squeeze(1) out = torch.cat([mix, queries], dim=1) if self.rnn_cell == 'lstm': h = self.project_h(out).unsqueeze(0) c = self.project_c(out).unsqueeze(0) new_s = h, c else: new_s = self.project(out).unsqueeze(0) return new_s def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'rnn_cell': 4, 'query_size': 4, 'key_size': 4, 'content_size': 4, 'output_size': 4, 'attn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 8), (8, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_repeat_tanh_0[grid(64)](buf2, buf0, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf4 = reinterpret_tensor(buf0, (16, 1), (1, 1), 0) del buf0 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_sigmoid_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0 ), primals_9, out=buf6) del buf5 buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_2[grid(32)](buf6, primals_4, buf7, 32, XBLOCK= 32, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_11, buf7, reinterpret_tensor( primals_10, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf8) del primals_11 return reinterpret_tensor(buf8, (1, 4, 4), (16, 4, 1), 0 ), primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf2, buf4, buf7, primals_10, reinterpret_tensor(primals_9, (4, 4, 4), (16, 1, 4), 0), primals_7 class AttnConnectorNew(nn.Module): def __init__(self, rnn_cell, query_size, key_size, content_size, output_size, attn_size): super(AttnConnectorNew, self).__init__() self.query_embed = nn.Linear(query_size, attn_size) self.key_embed = nn.Linear(key_size, attn_size) self.attn_w = nn.Linear(attn_size, 1) if rnn_cell == 'lstm': self.project_h = nn.Linear(content_size + query_size, output_size) self.project_c = nn.Linear(content_size + query_size, output_size) else: self.project = nn.Linear(content_size + query_size, output_size) self.rnn_cell = rnn_cell self.query_size = query_size self.key_size = key_size self.content_size = content_size self.output_size = output_size def forward(self, input_0, input_1, input_2): primals_2 = self.query_embed.weight primals_3 = self.query_embed.bias primals_4 = self.key_embed.weight primals_6 = self.key_embed.bias primals_7 = self.attn_w.weight primals_8 = self.attn_w.bias primals_10 = self.project.weight primals_11 = self.project.bias primals_5 = input_0 primals_1 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
ruinunca/NeuralDialog-ZSDG
AttnConnector
false
16,352
[ "Apache-2.0" ]
132
c20359541036ea876a126d1c7c172b820476dcb2
https://github.com/ruinunca/NeuralDialog-ZSDG/tree/c20359541036ea876a126d1c7c172b820476dcb2
DenseBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pz/cpzxz6lkuih5yejx2jgx2k2tt7datj3a4ghmi4d5empkhgoz5dif.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [1], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 5) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u6/cu6p4t7q5wtwgs5cpdhj7mpibqv5emb5qdj4v6x2yo45mtrk3dke.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %mul], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 8 x0 = xindex % 4 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (5*((-4) + x1)) + (20*x2)), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + (5*((-4) + x1)) + (20*x2)), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 80, grid=grid(80), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 5), (20, 5, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 80, grid=grid(80), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_3, buf1, buf3, buf4, 128, grid=grid(128), stream=stream0) return (buf4, primals_1, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch import torch.nn.functional as F class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): """Two parallel 1D causal convolution layers w/tanh and sigmoid activations Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions of the input, and T is the number of steps. Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the number of dimensions of the input, `F` is the number of filters, and `T` is the length of the input sequence. Arguments: in_channels (int): number of input channels filters (int): number of filters per channel """ def __init__(self, in_channels, filters, dilation=1): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh * sig], dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'filters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 5 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 5 * (-4 + x1) + 20 * x2), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 5 * (-4 + x1) + 20 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(80)](buf1, primals_2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 5), (20, 5, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(80)](buf3, primals_5, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_3, buf1, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_3, primals_4, buf1, buf3 class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlockNew(nn.Module): """Two parallel 1D causal convolution layers w/tanh and sigmoid activations Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions of the input, and T is the number of steps. Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the number of dimensions of the input, `F` is the number of filters, and `T` is the length of the input sequence. Arguments: in_channels (int): number of input channels filters (int): number of filters per channel """ def __init__(self, in_channels, filters, dilation=1): super(DenseBlockNew, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, input_0): primals_1 = self.causal_conv1.causal_conv.weight primals_2 = self.causal_conv1.causal_conv.bias primals_4 = self.causal_conv2.causal_conv.weight primals_5 = self.causal_conv2.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
sagelywizard/snail
DenseBlock
false
16,353
[ "MIT" ]
100
1c64787aa970c82f65c3c9d253531d1c2b1bee08
https://github.com/sagelywizard/snail/tree/1c64787aa970c82f65c3c9d253531d1c2b1bee08
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cujzlgv5paggcqvowszmgikvspf42zdbqcmnleguvueceyuqsxsj.py # Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d] # Source node to ATen node mapping: # x_1 => convolution # x_2 => gt, mul, where # x_3 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x4 = (xindex // 36) x2 = (xindex // 36) % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x5), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dg/cdgipw5lidus6ejrmtsvhi3xspozx4gxsm7vdevnhdmi473ajide.py # Topologically Sorted Source Nodes: [x_4, iadd, x_5], Original ATen: [aten.convolution, aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # iadd => add # x_4 => convolution_1 # x_5 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.01), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add, %mul_1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(in_out_ptr0 + (x3), tmp9, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qd/cqdnykuswbvikcl3ihtuuicoxuj426qxonufbkfpzudrdfgnbxqu.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_1 => convolution # x_2 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d] triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1.run(buf1, primals_3, buf2, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4, iadd, x_5], Original ATen: [aten.convolution, aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2.run(buf4, primals_5, primals_1, buf5, 256, grid=grid(256), stream=stream0) del primals_1 del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf1, primals_3, buf6, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 return (buf4, primals_2, primals_4, buf0, buf2, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm=None, residual=True, activation='leakyrelu', in_place_activation=True, transpose=False, reflectpad=True): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose self.reflectpad = reflectpad if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm is not None: if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=0 if self.reflectpad else 1) if self.activation == 'relu': self.actfun1 = nn.ReLU(inplace=in_place_activation) self.actfun2 = nn.ReLU(inplace=in_place_activation) elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU(inplace=in_place_activation) self.actfun2 = nn.LeakyReLU(inplace=in_place_activation) elif self.activation == 'elu': self.actfun1 = nn.ELU(inplace=in_place_activation) self.actfun2 = nn.ELU(inplace=in_place_activation) elif self.activation == 'selu': self.actfun1 = nn.SELU(inplace=in_place_activation) self.actfun2 = nn.SELU(inplace=in_place_activation) if self.reflectpad: self.rpad1 = nn.ReflectionPad2d(1) self.rpad2 = nn.ReflectionPad2d(1) def forward(self, x): ox = x if self.reflectpad: x = self.rpad1(x) x = self.conv1(x) if self.dropout: x = self.dropout1(x) x = self.actfun1(x) if self.norm1: x = self.norm1(x) if self.reflectpad: x = self.rpad2(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) if self.norm2: x = self.norm2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x5, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(in_out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_1[grid(576)]( buf1, primals_3, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2[grid (256)](buf4, primals_5, primals_1, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(256) ](buf1, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1 ) del buf1 del primals_3 return buf4, primals_2, primals_4, buf0, buf2, buf5, buf6 class ConvBlockNew(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm=None, residual=True, activation='leakyrelu', in_place_activation=True, transpose=False, reflectpad=True): super(ConvBlockNew, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose self.reflectpad = reflectpad if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm is not None: if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=0 if self.reflectpad else 1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=0 if self.reflectpad else 1) if self.activation == 'relu': self.actfun1 = nn.ReLU(inplace=in_place_activation) self.actfun2 = nn.ReLU(inplace=in_place_activation) elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU(inplace=in_place_activation) self.actfun2 = nn.LeakyReLU(inplace=in_place_activation) elif self.activation == 'elu': self.actfun1 = nn.ELU(inplace=in_place_activation) self.actfun2 = nn.ELU(inplace=in_place_activation) elif self.activation == 'selu': self.actfun1 = nn.SELU(inplace=in_place_activation) self.actfun2 = nn.SELU(inplace=in_place_activation) if self.reflectpad: self.rpad1 = nn.ReflectionPad2d(1) self.rpad2 = nn.ReflectionPad2d(1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
royerloic/aydin
ConvBlock
false
16,354
[ "BSD-3-Clause" ]
78
f9c61a24030891d008c318b250da5faec69fcd7d
https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/74/c74cgsnsrc4jmxl2qqe5j2mmuphscf6e56iqf2iy24pro6mj73sp.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.datasets import * import torch.nn.functional as F import torch.nn as nn from torchvision.transforms import * class Normalize(nn.Module): """Performs :math:`L_p` normalization of inputs over specified dimension. Does: .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)} for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix norm. With default arguments normalizes over the second dimension with Euclidean norm. Args: p (float): the exponent value in the norm formulation. Default: 2 dim (int): the dimension to reduce. Default: 1 """ def __init__(self, p=2, dim=1): super(Normalize, self).__init__() self.p = p self.dim = dim def forward(self, x): return F.normalize(x, self.p, self.dim, eps=1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torchvision.datasets import * import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): """Performs :math:`L_p` normalization of inputs over specified dimension. Does: .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)} for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix norm. With default arguments normalizes over the second dimension with Euclidean norm. Args: p (float): the exponent value in the norm formulation. Default: 2 dim (int): the dimension to reduce. Default: 1 """ def __init__(self, p=2, dim=1): super(NormalizeNew, self).__init__() self.p = p self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ruijieren98/DANet
Normalize
false
16,355
[ "MIT" ]
2,190
e38d61e371179833c08888fd5a1ee444cf5bd875
https://github.com/ruijieren98/DANet/tree/e38d61e371179833c08888fd5a1ee444cf5bd875
CausalConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/2a/c2aucoxj5ek5nl3pl5n67fq4vdl55x2rw66tvv2aq23fsb3jvf5q.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # out => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [3, 0, 3, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x4 = xindex tmp0 = (-3) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-3) + x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-15) + x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6r/c6rjd3rxthhw2ub6d2gtgjazrjeoyhalmj36ujwzgwexsis27e73.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) triton_per_fused__weight_norm_interface_1 = async_compile.triton('triton_per_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_1.run(buf2, primals_3, primals_2, buf3, 4, 64, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf5, buf3, primals_2, primals_3, buf0, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class WNConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out class CausalConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding='downright', activation=None): super().__init__() if isinstance(kernel_size, int): kernel_size = [kernel_size] * 2 self.kernel_size = kernel_size if padding == 'downright': pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0] elif padding == 'down' or padding == 'causal': pad = kernel_size[1] // 2 pad = [pad, pad, kernel_size[0] - 1, 0] self.causal = 0 if padding == 'causal': self.causal = kernel_size[1] // 2 self.pad = nn.ZeroPad2d(pad) self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride= stride, padding=0, activation=activation) def forward(self, input): out = self.pad(input) if self.causal > 0: self.conv.conv.weight_v.data[:, :, -1, self.causal:].zero_() out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -3 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -3 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-15 + x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3, primals_2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3 class WNConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out class CausalConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding='downright', activation=None): super().__init__() if isinstance(kernel_size, int): kernel_size = [kernel_size] * 2 self.kernel_size = kernel_size if padding == 'downright': pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0] elif padding == 'down' or padding == 'causal': pad = kernel_size[1] // 2 pad = [pad, pad, kernel_size[0] - 1, 0] self.causal = 0 if padding == 'causal': self.causal = kernel_size[1] // 2 self.pad = nn.ZeroPad2d(pad) self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride= stride, padding=0, activation=activation) def forward(self, input_0): primals_4 = self.conv.conv.bias primals_2 = self.conv.conv.weight_g primals_1 = self.conv.conv.weight_v primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
sajjad2014/vq-vae-2-pytorch
CausalConv2d
false
16,356
[ "MIT" ]
1,007
ef5f67c46f93624163776caec9e0d95063910eca
https://github.com/sajjad2014/vq-vae-2-pytorch/tree/ef5f67c46f93624163776caec9e0d95063910eca
SpatialRescaler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lr/clrdhm67vxdbhwd4sj3imhpdg2ompqphgvrdg44jue33scir6pix.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 2.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x2 = (xindex // 4) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + (x3), tmp44, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0; del buf0 # reuse buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf3, arg0_1, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from functools import partial import torch.nn as nn class SpatialRescaler(nn.Module): def __init__(self, n_stages=1, method='bilinear', multiplier=0.5, in_channels=3, out_channels=None, bias=False): super().__init__() self.n_stages = n_stages assert self.n_stages >= 0 assert method in ['nearest', 'linear', 'bilinear', 'trilinear', 'bicubic', 'area'] self.multiplier = multiplier self.interpolator = partial(torch.nn.functional.interpolate, mode= method) self.remap_output = out_channels is not None if self.remap_output: None self.channel_mapper = nn.Conv2d(in_channels, out_channels, 1, bias=bias) def forward(self, x): for stage in range(self.n_stages): x = self.interpolator(x, scale_factor=self.multiplier) if self.remap_output: x = self.channel_mapper(x) return x def encode(self, x): return self(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from functools import partial import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x2 = xindex // 4 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf0 del buf0 buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (64)](buf3, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf3, class SpatialRescalerNew(nn.Module): def __init__(self, n_stages=1, method='bilinear', multiplier=0.5, in_channels=3, out_channels=None, bias=False): super().__init__() self.n_stages = n_stages assert self.n_stages >= 0 assert method in ['nearest', 'linear', 'bilinear', 'trilinear', 'bicubic', 'area'] self.multiplier = multiplier self.interpolator = partial(torch.nn.functional.interpolate, mode= method) self.remap_output = out_channels is not None if self.remap_output: None self.channel_mapper = nn.Conv2d(in_channels, out_channels, 1, bias=bias) def encode(self, x): return self(x) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
samedii/latent-diffusion
SpatialRescaler
false
16,357
[ "MIT" ]
563
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
https://github.com/samedii/latent-diffusion/tree/f13bf9bf463d95b5a16aeadd2b02abde31f769f8
UpsampleConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import math import torch from torchvision.datasets import * import torch.nn.functional as F from torch.nn import Parameter from torch.nn.modules.utils import _pair from torchvision.transforms import * class UpsampleConv2d(Module): """ To avoid the checkerboard artifacts of standard Fractionally-strided Convolution, we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for each convolutional window. .. image:: _static/img/upconv.png :width: 50% :align: center Reference: Hang Zhang and Kristin Dana. "Multi-style Generative Network for Real-time Transfer." *arXiv preprint arXiv:1703.06953 (2017)* Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 output_padding (int or tuple, optional): Zero-padding added to one side of the output. Default: 0 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 scale_factor (int): scaling factor for upsampling convolution. Default: 1 Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where :math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]` :math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]` Attributes: weight (Tensor): the learnable weights of the module of shape (in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1]) bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels) Examples: >>> # With square kernels and equal stride >>> m = nn.UpsampleCov2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) >>> output = m(input) >>> # exact output size can be also specified as an argument >>> input = autograd.Variable(torch.randn(1, 16, 12, 12)) >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) >>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1) >>> h = downsample(input) >>> h.size() torch.Size([1, 16, 6, 6]) >>> output = upsample(h, output_size=input.size()) >>> output.size() torch.Size([1, 16, 12, 12]) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, scale_factor=1, bias=True): super(UpsampleConv2d, self).__init__() kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.scale_factor = scale_factor self.weight = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): out = F.conv2d(input, self.weight, self.bias, self.stride, self. padding, self.dilation, self.groups) return F.pixel_shuffle(out, self.scale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torchvision.datasets import * from torch.nn import Parameter from torch.nn.modules.utils import _pair from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0 ), primals_1, primals_3 class UpsampleConv2dNew(Module): """ To avoid the checkerboard artifacts of standard Fractionally-strided Convolution, we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for each convolutional window. .. image:: _static/img/upconv.png :width: 50% :align: center Reference: Hang Zhang and Kristin Dana. "Multi-style Generative Network for Real-time Transfer." *arXiv preprint arXiv:1703.06953 (2017)* Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 output_padding (int or tuple, optional): Zero-padding added to one side of the output. Default: 0 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 scale_factor (int): scaling factor for upsampling convolution. Default: 1 Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where :math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]` :math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]` Attributes: weight (Tensor): the learnable weights of the module of shape (in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1]) bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels) Examples: >>> # With square kernels and equal stride >>> m = nn.UpsampleCov2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) >>> output = m(input) >>> # exact output size can be also specified as an argument >>> input = autograd.Variable(torch.randn(1, 16, 12, 12)) >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) >>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1) >>> h = downsample(input) >>> h.size() torch.Size([1, 16, 6, 6]) >>> output = upsample(h, output_size=input.size()) >>> output.size() torch.Size([1, 16, 12, 12]) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, scale_factor=1, bias=True): super(UpsampleConv2dNew, self).__init__() kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.scale_factor = scale_factor self.weight = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels * scale_factor * scale_factor)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ruijieren98/DANet
UpsampleConv2d
false
16,358
[ "MIT" ]
2,190
e38d61e371179833c08888fd5a1ee444cf5bd875
https://github.com/ruijieren98/DANet/tree/e38d61e371179833c08888fd5a1ee444cf5bd875
CCAMDec
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3m/c3mxgkf4weymbmbgydi4j4i6eycdz2flzbf3jce3eapte2aqyfta.py # Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub] # Source node to ATen node mapping: # energy_new => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %bmm), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (x2), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5e/c5e7z5qmoiqut4wygb4iv6xmv65bbiotnb64o5cgidinohzcyout.py # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # out_2 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %view_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0), xmask) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attention, out], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(primals_1, primals_3, buf4, buf5, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torchvision.datasets import * from torch.nn import Parameter from torch.nn import Softmax from torchvision.transforms import * class CCAMDec(Module): """ CCAM decoding module """ def __init__(self): super(CCAMDec, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) def forward(self, x, y): """ inputs : x : input feature(N,C,H,W) y:gathering centers(N,K,H,W) returns : out : compact channel attention feature attention map: K*C """ m_batchsize, C, width, height = x.size() x_reshape = x.view(m_batchsize, C, -1) B, K, _W, _H = y.size() y_reshape = y.view(B, K, -1) proj_query = x_reshape proj_key = y_reshape.permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy ) - energy attention = self.softmax(energy_new) proj_value = y.view(B, K, -1) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, C, width, height) out = x + self.scale * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torchvision.datasets import * from torch.nn import Parameter from torch.nn import Softmax from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_1, primals_3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf5, buf4 class CCAMDecNew(Module): """ CCAM decoding module """ def __init__(self): super(CCAMDecNew, self).__init__() self.softmax = Softmax(dim=-1) self.scale = Parameter(torch.zeros(1)) def forward(self, input_0, input_1): primals_3 = self.scale primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ruijieren98/DANet
CCAMDec
false
16,359
[ "MIT" ]
2,190
e38d61e371179833c08888fd5a1ee444cf5bd875
https://github.com/ruijieren98/DANet/tree/e38d61e371179833c08888fd5a1ee444cf5bd875
TransposedUpsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nl/cnlja6aqebw6siiaulkdetdnrbx6gohzmt4yefxx6qbxqxop243x.py # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv_transpose2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 121) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 11, 11), (484, 121, 11, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 1936, grid=grid(1936), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class TransposedUpsample(nn.Module): """Learned 2x upsampling without padding""" def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels, self.out_channels, kernel_size=ks, stride=2) def forward(self, x): return self.up(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 121 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 11, 11), (484, 121, 11, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1936)](buf1, primals_2, 1936, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class TransposedUpsampleNew(nn.Module): """Learned 2x upsampling without padding""" def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels, self.out_channels, kernel_size=ks, stride=2) def forward(self, input_0): primals_1 = self.up.weight primals_2 = self.up.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
samedii/latent-diffusion
TransposedUpsample
false
16,360
[ "MIT" ]
563
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
https://github.com/samedii/latent-diffusion/tree/f13bf9bf463d95b5a16aeadd2b02abde31f769f8
VeryFlatNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/34/c34d2mh5osomrpuofhcl5rcs4tv272pdsyor2yyfmx5t73lzww2k.py # Topologically Sorted Source Nodes: [cat, features], Original ATen: [aten.cat, aten.relu] # Source node to ATen node mapping: # cat => cat # features => relu # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %convolution], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%cat,), kwargs = {}) triton_poi_fused_cat_relu_0 = async_compile.triton('triton_poi_fused_cat_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2113536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 129 x3 = (xindex // 129) x1 = (xindex // 129) % 4096 x2 = (xindex // 528384) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 129, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x1 + (4096*((-1) + x0)) + (524288*x2)), tmp6, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + ((-1) + x0), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + (x4), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7b/c7btydfsxllmph2kxwoqx3uwsebekzs2idou3u6ojyln7mtvw5um.py # Topologically Sorted Source Nodes: [conv2d_1, features_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # features_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m7/cm7pmgobq4fiekfe4oudsv4gplspdsqjhighfwrhe352mqkomko4.py # Topologically Sorted Source Nodes: [conv2d_2, features_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # features_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/v3/cv3linhi6nr3yne7ukhituzykff2lkodbbblzmjwtnp4jd7ntgat.py # Topologically Sorted Source Nodes: [conv2d_3, features_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # features_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/r5/cr5hsdzm3umxzyugh5gz7of3sqnlmvxlldfsvrwk3ihi24ceaxl2.py # Topologically Sorted Source Nodes: [prediction], Original ATen: [aten.convolution] # Source node to ATen node mapping: # prediction => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (128, 1, 9, 9), (81, 81, 9, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 129, 1, 1), (129, 1, 1, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (16, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_9, (16, ), (1, )) assert_size_stride(primals_10, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 129, 64, 64), (528384, 1, 8256, 129), torch.float32) # Topologically Sorted Source Nodes: [cat, features], Original ATen: [aten.cat, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_cat_relu_0.run(primals_3, buf0, primals_2, buf1, 2113536, grid=grid(2113536), stream=stream0) del buf0 del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, features_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, features_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_3, features_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf7, primals_9, 262144, grid=grid(262144), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [prediction], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 64, 64), (4096, 1, 64, 1)) buf9 = reinterpret_tensor(buf8, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [prediction], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf9, primals_11, 16384, grid=grid(16384), stream=stream0) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 1, 9, 9), (81, 81, 9, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 129, 1, 1), (129, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from itertools import chain import torch.nn.functional as F class VeryFlatNet(nn.Module): def __init__(self, num_channels=128, kernel_size=9): super(VeryFlatNet, self).__init__() self.num_channels = num_channels None padding = int((kernel_size - 1) / 2) self.convfeatures = nn.Conv2d(1, num_channels, groups=1, kernel_size=kernel_size, padding=padding, stride=1) channels = 1 + num_channels self.convp0 = nn.Conv2d(channels, channels // 2, kernel_size=1, padding=0) channels = channels // 2 self.convp1 = nn.Conv2d(channels, channels // 2, kernel_size=1, padding=0) channels = channels // 2 self.convp2 = nn.Conv2d(channels, channels // 2, kernel_size=1, padding=0) channels = channels // 2 self.convpf = nn.Conv2d(channels, 1, kernel_size=1, padding=0) def set_weights(self, weights, bias=0): next(self.parameters()).device with torch.no_grad(): length = weights.shape[0] None self.convfeatures._parameters['weight'][0:length ] = torch.from_numpy(weights) def lastparameters(self): return chain(self.convp0.parameters(), self.convp1.parameters(), self.convp2.parameters(), self.convpf.parameters()) def verylastparameters(self): return self.convp5.parameters() def forward(self, x): y = self.convfeatures(x) features = F.relu(torch.cat((x, y), 1)) features = F.relu(self.convp0(features)) features = F.relu(self.convp1(features)) features = F.relu(self.convp2(features)) prediction = self.convpf(features) return prediction def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from itertools import chain assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 129 x3 = xindex // 129 x1 = xindex // 129 % 4096 x2 = xindex // 528384 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 129, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 4096 * (-1 + x0) + 524288 * x2), tmp6, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + (-1 + x0), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x4, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 1, 9, 9), (81, 81, 9, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 129, 1, 1), (129, 1, 1, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (16, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 129, 64, 64), (528384, 1, 8256, 129), torch.float32) get_raw_stream(0) triton_poi_fused_cat_relu_0[grid(2113536)](primals_3, buf0, primals_2, buf1, 2113536, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 1, 2048, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(524288)](buf5, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 1, 1024, 16)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(262144)](buf7, primals_9, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 64, 64), (4096, 1, 64, 1)) buf9 = reinterpret_tensor(buf8, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf8 triton_poi_fused_convolution_4[grid(16384)](buf9, primals_11, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) class VeryFlatNetNew(nn.Module): def __init__(self, num_channels=128, kernel_size=9): super(VeryFlatNetNew, self).__init__() self.num_channels = num_channels None padding = int((kernel_size - 1) / 2) self.convfeatures = nn.Conv2d(1, num_channels, groups=1, kernel_size=kernel_size, padding=padding, stride=1) channels = 1 + num_channels self.convp0 = nn.Conv2d(channels, channels // 2, kernel_size=1, padding=0) channels = channels // 2 self.convp1 = nn.Conv2d(channels, channels // 2, kernel_size=1, padding=0) channels = channels // 2 self.convp2 = nn.Conv2d(channels, channels // 2, kernel_size=1, padding=0) channels = channels // 2 self.convpf = nn.Conv2d(channels, 1, kernel_size=1, padding=0) def set_weights(self, weights, bias=0): next(self.parameters()).device with torch.no_grad(): length = weights.shape[0] None self.convfeatures._parameters['weight'][0:length ] = torch.from_numpy(weights) def lastparameters(self): return chain(self.convp0.parameters(), self.convp1.parameters(), self.convp2.parameters(), self.convpf.parameters()) def verylastparameters(self): return self.convp5.parameters() def forward(self, input_0): primals_1 = self.convfeatures.weight primals_2 = self.convfeatures.bias primals_4 = self.convp0.weight primals_5 = self.convp0.bias primals_6 = self.convp1.weight primals_7 = self.convp1.bias primals_8 = self.convp2.weight primals_9 = self.convp2.bias primals_10 = self.convpf.weight primals_11 = self.convpf.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
royerloic/aydin
VeryFlatNet
false
16,361
[ "BSD-3-Clause" ]
78
f9c61a24030891d008c318b250da5faec69fcd7d
https://github.com/royerloic/aydin/tree/f9c61a24030891d008c318b250da5faec69fcd7d
GEGLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lu/cluwa2zxstqa2ofccsmypwu2y4vyztng2sd4k6d245nfmzc7h2l4.py # Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul] # Source node to ATen node mapping: # gelu => add, erf, mul, mul_1, mul_2 # mul => mul_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %mul_2), kwargs = {}) triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865476 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp10 = tmp0 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_gelu_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865476 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp10 = tmp0 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 4) class GEGLUNew(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
samedii/latent-diffusion
GEGLU
false
16,362
[ "MIT" ]
563
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
https://github.com/samedii/latent-diffusion/tree/f13bf9bf463d95b5a16aeadd2b02abde31f769f8
ResizeGatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # h => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m2/cm2ow7tteupkylxa4oaf276y5mit4zfigwupzb7qbyrjcvmbebfg.py # Topologically Sorted Source Nodes: [h_1, conv2d_1, g, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # g => sigmoid # h_1 => convolution # out => mul_4 # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_4, %primals_5, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) triton_poi_fused_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 169) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 13, 13), (676, 169, 13, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 13, 13), (676, 169, 13, 1)) buf2 = buf1; del buf1 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 13, 13), (676, 169, 13, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1, conv2d_1, g, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] triton_poi_fused_convolution_mul_sigmoid_1.run(buf2, buf4, primals_3, primals_5, buf5, 2704, grid=grid(2704), stream=stream0) del primals_3 del primals_5 return (buf5, primals_2, primals_4, buf0, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class GatedConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super(GatedConv2d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) def forward(self, x): if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g class ResizeGatedConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, scale_factor=2, activation=None): super(ResizeGatedConv2d, self).__init__() self.activation = activation self.upsamplingNN = nn.Upsample(scale_factor=scale_factor) self.conv = GatedConv2d(input_channels, output_channels, kernel_size, stride, padding, dilation, activation=activation) def forward(self, x): h = self.upsamplingNN(x) out = self.conv(h) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 169 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 13, 13), (676, 169, 13, 1)) buf3 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 13, 13), (676, 169, 13, 1)) buf2 = buf1 del buf1 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 13, 13), (676, 169, 13, 1), torch. float32) triton_poi_fused_convolution_mul_sigmoid_1[grid(2704)](buf2, buf4, primals_3, primals_5, buf5, 2704, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf4 class GatedConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super(GatedConv2d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) def forward(self, x): if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g class ResizeGatedConv2dNew(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, scale_factor=2, activation=None): super(ResizeGatedConv2dNew, self).__init__() self.activation = activation self.upsamplingNN = nn.Upsample(scale_factor=scale_factor) self.conv = GatedConv2d(input_channels, output_channels, kernel_size, stride, padding, dilation, activation=activation) def forward(self, input_0): primals_1 = self.conv.h.weight primals_3 = self.conv.h.bias primals_2 = self.conv.g.weight primals_5 = self.conv.g.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
sanghiad/vae_vampprior
ResizeGatedConv2d
false
16,363
[ "MIT" ]
218
d24bc0c8781b7ee7b9570c2d560e43bceff50da4
https://github.com/sanghiad/vae_vampprior/tree/d24bc0c8781b7ee7b9570c2d560e43bceff50da4