entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
EnDeWithPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5b/c5b7soflxobdhhzkim3kecmdxhe5yvpgrmlmq2zwhe7zsmzyed2c.py # Topologically Sorted Source Nodes: [conv2d, selu], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # selu => expm1, gt, mul, mul_1, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0507009873554805), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.7580993408473766), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wy/cwyx3wa4jndgnwzcjpr33hhlviahccyeckxfax46ztwjbjc22gd7.py # Topologically Sorted Source Nodes: [conv1_], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # conv1_ => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kf/ckfdli7yjwk3c6ojm5hqpsdsli2fgswmnh33ygbopk7zwjgklb6w.py # Topologically Sorted Source Nodes: [conv2d_1, selu_1], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # selu_1 => expm1_1, gt_1, mul_3, mul_4, mul_5, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0507009873554805), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_4,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.7580993408473766), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_convolution_elu_2 = async_compile.triton('triton_poi_fused_convolution_elu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6y/c6yx6oq7oo2cwoaop3iwu5iqfdckg6lycdtu4jjuiv3wdcf2o6p7.py # Topologically Sorted Source Nodes: [conv2_], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # conv2_ => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/su/csukxd5c6zcmjaz4vynojuv2mn7hyj4zxaikhjwfyrysdbqpzkud.py # Topologically Sorted Source Nodes: [conv2d_2, selu_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # selu_2 => expm1_2, gt_2, mul_6, mul_7, mul_8, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0507009873554805), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_7,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.7580993408473766), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_convolution_elu_4 = async_compile.triton('triton_poi_fused_convolution_elu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a4/ca43wvja2n3mesrfuj54dcwx324bk23dhpnatmpi7kjryanvrx2z.py # Topologically Sorted Source Nodes: [conv3_], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # conv3_ => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c4/cc4pcjn3iwkiegowujfyxnsqdjvzyqpxaxhkbsxgsqfltpbmhgzx.py # Topologically Sorted Source Nodes: [conv2d_3, intermediate_], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # intermediate_ => expm1_3, gt_3, mul_10, mul_11, mul_9, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0507009873554805), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_10,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.7580993408473766), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_convolution_elu_6 = async_compile.triton('triton_poi_fused_convolution_elu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i7/ci7zdbqfnevulfroz3py57lhtspaldzcd64usz2t57n6gfd23xny.py # Topologically Sorted Source Nodes: [conv_transpose2d, conv2d_4, selu_4, skip_deconv3_, deconv3_], Original ATen: [aten.convolution, aten.elu, aten.add] # Source node to ATen node mapping: # conv2d_4 => convolution_5 # conv_transpose2d => convolution_4 # deconv3_ => expm1_5, gt_5, mul_15, mul_16, mul_17, where_5 # selu_4 => expm1_4, gt_4, mul_12, mul_13, mul_14, where_4 # skip_deconv3_ => add # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {}) # %convolution_5 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 1.0507009873554805), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 1.0), kwargs = {}) # %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_13,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.7580993408473766), kwargs = {}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %mul_12, %mul_14), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %where_4), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0507009873554805), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {}) # %expm1_5 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_16,), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_5, 1.7580993408473766), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %mul_15, %mul_17), kwargs = {}) triton_poi_fused_add_convolution_elu_7 = async_compile.triton('triton_poi_fused_add_convolution_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_elu_7', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_elu_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), None) tmp4 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0507009873554805 tmp9 = tmp2 * tmp8 tmp10 = 1.0 tmp11 = tmp2 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = 1.7580993408473766 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp7, tmp9, tmp14) tmp16 = tmp5 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tmp16 * tmp10 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp13 tmp22 = tl.where(tmp17, tmp18, tmp21) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(in_out_ptr1 + (x3), tmp22, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xe/cxe5gtcmbuknqumo5vcqgumqscdkgd7hgp7o5iu7qzvcob6taoz7.py # Topologically Sorted Source Nodes: [conv_transpose2d_1, conv2d_5, selu_6, skip_deconv2_, deconv2_], Original ATen: [aten.convolution, aten.elu, aten.add] # Source node to ATen node mapping: # conv2d_5 => convolution_7 # conv_transpose2d_1 => convolution_6 # deconv2_ => expm1_7, gt_7, mul_21, mul_22, mul_23, where_7 # selu_6 => expm1_6, gt_6, mul_18, mul_19, mul_20, where_6 # skip_deconv2_ => add_1 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_14, %primals_15, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {}) # %convolution_7 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 1.0507009873554805), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 1.0), kwargs = {}) # %expm1_6 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_19,), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_6, 1.7580993408473766), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %mul_18, %mul_20), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_6, %where_6), kwargs = {}) # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0507009873554805), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {}) # %expm1_7 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_22,), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_7, 1.7580993408473766), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %mul_21, %mul_23), kwargs = {}) triton_poi_fused_add_convolution_elu_8 = async_compile.triton('triton_poi_fused_add_convolution_elu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_elu_8', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_elu_8(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), None) tmp4 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0507009873554805 tmp9 = tmp2 * tmp8 tmp10 = 1.0 tmp11 = tmp2 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = 1.7580993408473766 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp7, tmp9, tmp14) tmp16 = tmp5 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tmp16 * tmp10 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp13 tmp22 = tl.where(tmp17, tmp18, tmp21) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(in_out_ptr1 + (x3), tmp22, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qf/cqfepjaacssrdx3gpigwaemn7fa5exigc3jc26jzduom5cw7afzj.py # Topologically Sorted Source Nodes: [conv_transpose2d_2, deconv1_], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv_transpose2d_2 => convolution_8 # deconv1_ => expm1_8, gt_8, mul_24, mul_25, mul_26, where_8 # Graph fragment: # %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_7, %primals_18, %primals_19, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {}) # %gt_8 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 1.0507009873554805), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 1.0), kwargs = {}) # %expm1_8 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_25,), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_8, 1.7580993408473766), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %mul_24, %mul_26), kwargs = {}) triton_poi_fused_convolution_elu_9 = async_compile.triton('triton_poi_fused_convolution_elu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a5/ca5vovie4nzzpjebj3wahhrwhrs7y474tmwedlvsy2srjecdlo5t.py # Topologically Sorted Source Nodes: [score], Original ATen: [aten.convolution] # Source node to ATen node mapping: # score => convolution_9 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_8, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32, ), (1, )) assert_size_stride(primals_12, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_13, (32, ), (1, )) assert_size_stride(primals_14, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (16, ), (1, )) assert_size_stride(primals_16, (16, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_17, (16, ), (1, )) assert_size_stride(primals_18, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_19, (8, ), (1, )) assert_size_stride(primals_20, (1, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_21, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, selu], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [conv1_], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, selu_1], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_2.run(buf5, primals_5, 131072, grid=grid(131072), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [conv2_], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, selu_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_4.run(buf9, primals_7, 65536, grid=grid(65536), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.float32) buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [conv3_], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_3, intermediate_], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_6.run(buf13, primals_9, 16384, grid=grid(16384), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf14, (4, 32, 16, 16), (8192, 256, 16, 1)) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf6, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 32, 16, 16), (8192, 256, 16, 1)) buf16 = buf15; del buf15 # reuse buf17 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d, conv2d_4, selu_4, skip_deconv3_, deconv3_], Original ATen: [aten.convolution, aten.elu, aten.add] triton_poi_fused_add_convolution_elu_7.run(buf16, buf17, primals_13, primals_11, 32768, grid=grid(32768), stream=stream0) del primals_11 del primals_13 # Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf18, (4, 16, 32, 32), (16384, 1024, 32, 1)) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf2, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf20 = buf19; del buf19 # reuse buf21 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_1, conv2d_5, selu_6, skip_deconv2_, deconv2_], Original ATen: [aten.convolution, aten.elu, aten.add] triton_poi_fused_add_convolution_elu_8.run(buf20, buf21, primals_17, primals_15, 65536, grid=grid(65536), stream=stream0) del primals_15 del primals_17 # Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_2, deconv1_], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_9.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [score], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [score], Original ATen: [aten.convolution] triton_poi_fused_convolution_10.run(buf25, primals_21, 16384, grid=grid(16384), stream=stream0) del primals_21 return (buf25, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf16, buf17, buf20, buf21, buf23, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((32, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((1, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class EnDeWithPooling(nn.Module): def __init__(self, activation, initType, numChannels, batchnorm=False, softmax=False): super(EnDeWithPooling, self).__init__() self.batchnorm = batchnorm self.bias = not batchnorm self.initType = initType self.activation = None self.numChannels = numChannels self.softmax = softmax if activation == 'relu': self.activation = nn.ReLU(inplace=True) else: self.activation = nn.SELU(inplace=True) self.conv1 = nn.Conv2d(self.numChannels, 16, 3, 1, 1, bias=self.bias) self.conv2 = nn.Conv2d(16, 32, 3, 1, 1, bias=self.bias) self.conv3 = nn.Conv2d(32, 64, 3, 1, 1, bias=self.bias) self.deconv3 = nn.ConvTranspose2d(64, 32, 3, 2, 1, 1) self.deconv2 = nn.ConvTranspose2d(32, 16, 3, 2, 1, 1) self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, 1) self.classifier = nn.Conv2d(8, 1, 1) self.pool = nn.MaxPool2d(2, 2) self.intermediate = nn.Conv2d(64, 64, 1, 1, 0, bias=self.bias) self.skip1 = nn.Conv2d(16, 16, 1, 1, 0, bias=self.bias) self.skip2 = nn.Conv2d(32, 32, 1, 1, 0, bias=self.bias) if self.batchnorm: self.bn1 = nn.BatchNorm2d(16) self.bn2 = nn.BatchNorm2d(32) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(32) self.bn5 = nn.BatchNorm2d(16) self.bn6 = nn.BatchNorm2d(8) def forward(self, x): if self.batchnorm: conv1_ = self.pool(self.bn1(self.activation(self.conv1(x)))) conv2_ = self.pool(self.bn2(self.activation(self.conv2(conv1_)))) conv3_ = self.pool(self.bn3(self.activation(self.conv3(conv2_)))) intermediate_ = self.activation(self.intermediate(conv3_)) skip_deconv3_ = self.deconv3(intermediate_) + self.activation(self .skip2(conv2_)) deconv3_ = self.bn4(self.activation(skip_deconv3_)) skip_deconv2_ = self.deconv2(deconv3_) + self.activation(self. skip1(conv1_)) deconv2_ = self.bn5(self.activation(skip_deconv2_)) deconv1_ = self.bn6(self.activation(self.deconv1(deconv2_))) score = self.classifier(deconv1_) else: conv1_ = self.pool(self.activation(self.conv1(x))) conv2_ = self.pool(self.activation(self.conv2(conv1_))) conv3_ = self.pool(self.activation(self.conv3(conv2_))) intermediate_ = self.activation(self.intermediate(conv3_)) skip_deconv3_ = self.deconv3(intermediate_) + self.activation(self .skip2(conv2_)) deconv3_ = self.activation(skip_deconv3_) skip_deconv2_ = self.deconv2(deconv3_) + self.activation(self. skip1(conv1_)) deconv2_ = self.activation(skip_deconv2_) deconv1_ = self.activation(self.deconv1(deconv2_)) if self.softmax: score = F.softmax(self.classifier(deconv1_), dim=1) else: score = self.classifier(deconv1_) return score def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'activation': 4, 'initType': 4, 'numChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_add_convolution_elu_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, None) tmp4 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0507009873554805 tmp9 = tmp2 * tmp8 tmp10 = 1.0 tmp11 = tmp2 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = 1.7580993408473766 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp7, tmp9, tmp14) tmp16 = tmp5 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tmp16 * tmp10 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp13 tmp22 = tl.where(tmp17, tmp18, tmp21) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp22, None) @triton.jit def triton_poi_fused_add_convolution_elu_8(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, None) tmp4 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp2 > tmp6 tmp8 = 1.0507009873554805 tmp9 = tmp2 * tmp8 tmp10 = 1.0 tmp11 = tmp2 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = 1.7580993408473766 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp7, tmp9, tmp14) tmp16 = tmp5 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tmp16 * tmp10 tmp20 = libdevice.expm1(tmp19) tmp21 = tmp20 * tmp13 tmp22 = tl.where(tmp17, tmp18, tmp21) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp22, None) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, None) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_13, (32,), (1,)) assert_size_stride(primals_14, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (16,), (1,)) assert_size_stride(primals_16, (16, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_17, (16,), (1,)) assert_size_stride(primals_18, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_19, (8,), (1,)) assert_size_stride(primals_20, (1, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_21, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_elu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_elu_4[grid(65536)](buf9, primals_7, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf9, buf10, buf11, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_elu_6[grid(16384)](buf13, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf14, (4, 32, 16, 16), (8192, 256, 16, 1)) buf15 = extern_kernels.convolution(buf6, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 32, 16, 16), (8192, 256, 16, 1)) buf16 = buf15 del buf15 buf17 = buf14 del buf14 triton_poi_fused_add_convolution_elu_7[grid(32768)](buf16, buf17, primals_13, primals_11, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 del primals_13 buf18 = extern_kernels.convolution(buf17, primals_14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf18, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf19 = extern_kernels.convolution(buf2, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf20 = buf19 del buf19 buf21 = buf18 del buf18 triton_poi_fused_add_convolution_elu_8[grid(65536)](buf20, buf21, primals_17, primals_15, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_elu_9[grid(131072)](buf23, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_10[grid(16384)](buf25, primals_21, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 return (buf25, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf16, buf17, buf20, buf21, buf23) class EnDeWithPoolingNew(nn.Module): def __init__(self, activation, initType, numChannels, batchnorm=False, softmax=False): super(EnDeWithPoolingNew, self).__init__() self.batchnorm = batchnorm self.bias = not batchnorm self.initType = initType self.activation = None self.numChannels = numChannels self.softmax = softmax if activation == 'relu': self.activation = nn.ReLU(inplace=True) else: self.activation = nn.SELU(inplace=True) self.conv1 = nn.Conv2d(self.numChannels, 16, 3, 1, 1, bias=self.bias) self.conv2 = nn.Conv2d(16, 32, 3, 1, 1, bias=self.bias) self.conv3 = nn.Conv2d(32, 64, 3, 1, 1, bias=self.bias) self.deconv3 = nn.ConvTranspose2d(64, 32, 3, 2, 1, 1) self.deconv2 = nn.ConvTranspose2d(32, 16, 3, 2, 1, 1) self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, 1) self.classifier = nn.Conv2d(8, 1, 1) self.pool = nn.MaxPool2d(2, 2) self.intermediate = nn.Conv2d(64, 64, 1, 1, 0, bias=self.bias) self.skip1 = nn.Conv2d(16, 16, 1, 1, 0, bias=self.bias) self.skip2 = nn.Conv2d(32, 32, 1, 1, 0, bias=self.bias) if self.batchnorm: self.bn1 = nn.BatchNorm2d(16) self.bn2 = nn.BatchNorm2d(32) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(32) self.bn5 = nn.BatchNorm2d(16) self.bn6 = nn.BatchNorm2d(8) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() if isinstance(m, nn.ConvTranspose2d): if self.initType == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, np.sqrt(2.0 / n)) elif self.initType == 'xavier': nn.init.xavier_normal_(m.weight.data) if m.bias is not None: m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_10 = self.deconv3.weight primals_11 = self.deconv3.bias primals_14 = self.deconv2.weight primals_15 = self.deconv2.bias primals_18 = self.deconv1.weight primals_19 = self.deconv1.bias primals_20 = self.classifier.weight primals_21 = self.classifier.bias primals_8 = self.intermediate.weight primals_9 = self.intermediate.bias primals_16 = self.skip1.weight primals_17 = self.skip1.bias primals_12 = self.skip2.weight primals_13 = self.skip2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
talsperre/INFER
EnDeWithPooling
false
16,564
[ "MIT" ]
56
38fb2356700c5a92991788b7eb9a267c99a07c5b
https://github.com/talsperre/INFER/tree/38fb2356700c5a92991788b7eb9a267c99a07c5b
SpatialDepthWiseSharedConvolution
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jb/cjbf3ssum7resbwampiwoknxcnzh4uzdy4fhoaakjojloew6qlw5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1], [2], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/e2/ce2f4ussgg6jfpqca2kweqtdut6siq46cus4r4zd4oeneykjlqi5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1], [2], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (64, 1, 6), (6, 6, 1)) del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 384, grid=grid(384), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0), primals_2, reinterpret_tensor(primals_1, (64, 1, 4), (1, 256, 64), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWiseSharedConvolution(Module): """ ## Spatial Depth Wise Shared Convolution We share the same kernel across all channels. """ def __init__(self, kernel_size: 'int'=3): """ """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=( kernel_size,), padding=(kernel_size - 1,)) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ seq_len, batch_size, heads, d_k = x.shape x = x.permute(1, 2, 3, 0) x = x.view(batch_size * heads * d_k, 1, seq_len) x = self.conv(x) x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, d_k, seq_len) x = x.permute(3, 0, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (64, 1, 6), (6, 6, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(384)](buf2, primals_3, 384, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0 ), primals_2, reinterpret_tensor(primals_1, (64, 1, 4), (1, 256, 64), 0 ) class SpatialDepthWiseSharedConvolutionNew(Module): """ ## Spatial Depth Wise Shared Convolution We share the same kernel across all channels. """ def __init__(self, kernel_size: 'int'=3): """ """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=( kernel_size,), padding=(kernel_size - 1,)) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpatialDepthWiseSharedConvolution
false
16,565
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
DownSample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kv/ckvcp5yyimbwh53rkecse243qnmz6pvukh6fzqoc42qysp7ikta3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # x_1 => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad2d_0 = async_compile.triton('triton_poi_fused_replication_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0))))) + (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) * ((((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0))))) + (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) * ((((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3k/c3kguicgoffxoot656zsenhhuwrflaxmmrcgimvhldpp36va3767.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # interpolate => _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, add_2, add_4, add_5, add_6, clamp_max_4, clamp_max_5, clamp_min_3, clamp_min_4, clamp_min_5, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_3, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {}) # %iota_3 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_3, torch.float32), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 2.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min_3 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_3, torch.int64), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_1, [None, None, %clamp_max_2, %clamp_max_3]), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_1, [None, None, %clamp_max_2, %convert_element_type_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_3, %convert_element_type_3), kwargs = {}) # %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_4 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_4, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_4), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_3), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_1, [None, None, %convert_element_type_1, %clamp_max_3]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_4), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %convert_element_type_1), kwargs = {}) # %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_5, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_5), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x2 = (xindex // 4) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + (x3), tmp44, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad2d_0.run(arg0_1, buf0, 576, grid=grid(576), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.replication_pad2d, aten.convolution] buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf4 = buf2; del buf2 # reuse buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1.run(buf5, buf1, 64, grid=grid(64), stream=stream0) del buf1 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class DownSample(nn.Module): """ <a id="down_sample"></a> ### Down-sample The down-sample operation [smoothens](#smooth) each feature channel and scale $2 imes$ using bilinear interpolation. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.smooth = Smooth() def forward(self, x: 'torch.Tensor'): x = self.smooth(x) return F.interpolate(x, (x.shape[2] // 2, x.shape[3] // 2), mode= 'bilinear', align_corners=False) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x2 = xindex // 4 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(576)](arg0_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf4 = buf2 del buf2 buf5 = buf4 del buf4 triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_1[grid (64)](buf5, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf5, class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class DownSampleNew(nn.Module): """ <a id="down_sample"></a> ### Down-sample The down-sample operation [smoothens](#smooth) each feature channel and scale $2 imes$ using bilinear interpolation. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.smooth = Smooth() def forward(self, input_0): arg1_1 = self.smooth.kernel arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
DownSample
false
16,566
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Smooth
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kv/ckvcp5yyimbwh53rkecse243qnmz6pvukh6fzqoc42qysp7ikta3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # x_1 => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad2d_0 = async_compile.triton('triton_poi_fused_replication_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0))))) + (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) * ((((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) < (3)))) + (16*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0))))) + (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) * ((((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad2d_0.run(arg0_1, buf0, 576, grid=grid(576), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.replication_pad2d, aten.convolution] buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 3)) + 16 * x2 + ( 3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(576)](arg0_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SmoothNew(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, input_0): arg1_1 = self.kernel arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Smooth
false
16,567
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
ATLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ft/cftz5ja4wgrkcx5pv7d7j6lunvnnmz6djadit33a2463r447sayy.py # Topologically Sorted Source Nodes: [setitem_1], Original ATen: [aten.lift_fresh, aten.fill] # Source node to ATen node mapping: # setitem_1 => copy_1, full_default_2 # Graph fragment: # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_2, %full_default_2), kwargs = {}) # %copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%select_int, %copy_1), kwargs = {}) triton_poi_fused_fill_lift_fresh_0 = async_compile.triton('triton_poi_fused_fill_lift_fresh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_0', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) tmp0 = 0.0 tl.store(out_ptr0 + (x0 + (64*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cn/ccnjm2dqpzcrnbuww2omqd3latcfln56427nk6wxia5xlnc6b5c5.py # Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, n_mask, sub_3, mul_2, logit2, log_softmax_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, sub_3, sum_1 # log_softmax_1 => amax_1, exp_1, sub_7, sum_3 # logit1 => sub_2 # logit2 => sub_6 # mul => mul # mul_2 => mul_2 # n_mask => sub # p_mask => add # setitem => copy, full_default_1 # sub_1 => sub_1 # sub_3 => sub_5 # th_label => full_default # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %full_default_1), kwargs = {}) # %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %copy, 1, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select_scatter_default), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 1e+30), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub_2, [-1], True), kwargs = {}) # %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, 1e+30), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul_2), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub_6, [-1], True), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_6, %amax_1), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_7,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1 = async_compile.triton('triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp2 = x1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp5 = 1.0 tmp6 = 0.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp1 + tmp7 tmp9 = tmp5 - tmp8 tmp10 = 1e+30 tmp11 = tmp9 * tmp10 tmp12 = tmp0 - tmp11 tmp15 = tmp14 + tmp7 tmp16 = tmp5 - tmp15 tmp17 = tmp16 * tmp10 tmp18 = tmp13 - tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp22 = tmp21 + tmp7 tmp23 = tmp5 - tmp22 tmp24 = tmp23 * tmp10 tmp25 = tmp20 - tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp29 = tmp28 + tmp7 tmp30 = tmp5 - tmp29 tmp31 = tmp30 * tmp10 tmp32 = tmp27 - tmp31 tmp33 = triton_helpers.maximum(tmp26, tmp32) tmp34 = tmp12 - tmp33 tmp35 = tl_math.exp(tmp34) tmp36 = tmp18 - tmp33 tmp37 = tl_math.exp(tmp36) tmp38 = tmp35 + tmp37 tmp39 = tmp25 - tmp33 tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tmp42 = tmp32 - tmp33 tmp43 = tl_math.exp(tmp42) tmp44 = tmp41 + tmp43 tmp45 = tmp5 - tmp1 tmp46 = tmp5 - tmp45 tmp47 = tmp46 * tmp10 tmp48 = tmp0 - tmp47 tmp49 = tmp5 - tmp14 tmp50 = tmp5 - tmp49 tmp51 = tmp50 * tmp10 tmp52 = tmp13 - tmp51 tmp53 = triton_helpers.maximum(tmp48, tmp52) tmp54 = tmp5 - tmp21 tmp55 = tmp5 - tmp54 tmp56 = tmp55 * tmp10 tmp57 = tmp20 - tmp56 tmp58 = triton_helpers.maximum(tmp53, tmp57) tmp59 = tmp5 - tmp28 tmp60 = tmp5 - tmp59 tmp61 = tmp60 * tmp10 tmp62 = tmp27 - tmp61 tmp63 = triton_helpers.maximum(tmp58, tmp62) tmp64 = tmp48 - tmp63 tmp65 = tl_math.exp(tmp64) tmp66 = tmp52 - tmp63 tmp67 = tl_math.exp(tmp66) tmp68 = tmp65 + tmp67 tmp69 = tmp57 - tmp63 tmp70 = tl_math.exp(tmp69) tmp71 = tmp68 + tmp70 tmp72 = tmp62 - tmp63 tmp73 = tl_math.exp(tmp72) tmp74 = tmp71 + tmp73 tl.store(out_ptr0 + (x3), tmp33, xmask) tl.store(out_ptr1 + (x3), tmp44, xmask) tl.store(out_ptr2 + (x3), tmp63, xmask) tl.store(out_ptr3 + (x3), tmp74, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cuj4drsaoeeafppqq56jc2dqbimn76sbovf5a42dyfiev4rt56n7.py # Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, mul_1, sum_1, loss1, n_mask, sub_3, mul_2, logit2, log_softmax_1, mul_3, sum_2, loss2, loss, loss_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax, aten.sum, aten.neg, aten.mean] # Source node to ATen node mapping: # log_softmax => log, sub_3, sub_4 # log_softmax_1 => amax_1, log_1, sub_7, sub_8 # logit1 => sub_2 # logit2 => sub_6 # loss => add_1 # loss1 => neg # loss2 => neg_1 # loss_1 => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # n_mask => sub # p_mask => add # setitem => copy, full_default_1 # sub_1 => sub_1 # sub_3 => sub_5 # sum_1 => sum_2 # sum_2 => sum_4 # th_label => full_default # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %full_default_1), kwargs = {}) # %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %copy, 1, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select_scatter_default), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 1e+30), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {}) # %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %amax), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %log), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, 1e+30), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul_2), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub_6, [-1], True), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_6, %amax_1), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_7, %log_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %select_scatter_default), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_4,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %neg_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {}) triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2 = async_compile.triton('triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = (rindex // 16) r4 = rindex % 16 r1 = (rindex // 4) % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r4 + (64*r2)), None) tmp1 = tl.load(in_ptr1 + (r4 + (64*r2)), None) tmp12 = tl.load(in_ptr2 + (r1 + (16*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (r1 + (16*r2)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (16 + r4 + (64*r2)), None) tmp19 = tl.load(in_ptr1 + (16 + r4 + (64*r2)), None) tmp27 = tl.load(in_ptr2 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr3 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (32 + r4 + (64*r2)), None) tmp35 = tl.load(in_ptr1 + (32 + r4 + (64*r2)), None) tmp43 = tl.load(in_ptr2 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr0 + (48 + r4 + (64*r2)), None) tmp51 = tl.load(in_ptr1 + (48 + r4 + (64*r2)), None) tmp59 = tl.load(in_ptr2 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp61 = tl.load(in_ptr3 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr4 + (r1 + (16*r2)), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr5 + (r1 + (16*r2)), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr4 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr5 + (4 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp91 = tl.load(in_ptr4 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp93 = tl.load(in_ptr5 + (8 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr4 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr5 + (12 + r1 + (16*r2)), None, eviction_policy='evict_last') tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = tmp2 == tmp2 tmp4 = 1.0 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp1 + tmp6 tmp8 = tmp4 - tmp7 tmp9 = 1e+30 tmp10 = tmp8 * tmp9 tmp11 = tmp0 - tmp10 tmp13 = tmp11 - tmp12 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 - tmp15 tmp17 = tmp16 * tmp1 tmp20 = tl.full([1, 1], 1, tl.int32) tmp21 = tmp20 == tmp2 tmp22 = tl.where(tmp21, tmp4, tmp5) tmp23 = tmp19 + tmp22 tmp24 = tmp4 - tmp23 tmp25 = tmp24 * tmp9 tmp26 = tmp18 - tmp25 tmp28 = tmp26 - tmp27 tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tmp32 = tmp31 * tmp19 tmp33 = tmp17 + tmp32 tmp36 = tl.full([1, 1], 2, tl.int32) tmp37 = tmp36 == tmp2 tmp38 = tl.where(tmp37, tmp4, tmp5) tmp39 = tmp35 + tmp38 tmp40 = tmp4 - tmp39 tmp41 = tmp40 * tmp9 tmp42 = tmp34 - tmp41 tmp44 = tmp42 - tmp43 tmp46 = tl_math.log(tmp45) tmp47 = tmp44 - tmp46 tmp48 = tmp47 * tmp35 tmp49 = tmp33 + tmp48 tmp52 = tl.full([1, 1], 3, tl.int32) tmp53 = tmp52 == tmp2 tmp54 = tl.where(tmp53, tmp4, tmp5) tmp55 = tmp51 + tmp54 tmp56 = tmp4 - tmp55 tmp57 = tmp56 * tmp9 tmp58 = tmp50 - tmp57 tmp60 = tmp58 - tmp59 tmp62 = tl_math.log(tmp61) tmp63 = tmp60 - tmp62 tmp64 = tmp63 * tmp51 tmp65 = tmp49 + tmp64 tmp66 = tmp4 - tmp1 tmp67 = tmp4 - tmp66 tmp68 = tmp67 * tmp9 tmp69 = tmp0 - tmp68 tmp71 = tmp69 - tmp70 tmp73 = tl_math.log(tmp72) tmp74 = tmp71 - tmp73 tmp75 = tmp74 * tmp6 tmp76 = tmp4 - tmp19 tmp77 = tmp4 - tmp76 tmp78 = tmp77 * tmp9 tmp79 = tmp18 - tmp78 tmp81 = tmp79 - tmp80 tmp83 = tl_math.log(tmp82) tmp84 = tmp81 - tmp83 tmp85 = tmp84 * tmp22 tmp86 = tmp75 + tmp85 tmp87 = tmp4 - tmp35 tmp88 = tmp4 - tmp87 tmp89 = tmp88 * tmp9 tmp90 = tmp34 - tmp89 tmp92 = tmp90 - tmp91 tmp94 = tl_math.log(tmp93) tmp95 = tmp92 - tmp94 tmp96 = tmp95 * tmp38 tmp97 = tmp86 + tmp96 tmp98 = tmp4 - tmp51 tmp99 = tmp4 - tmp98 tmp100 = tmp99 * tmp9 tmp101 = tmp50 - tmp100 tmp103 = tmp101 - tmp102 tmp105 = tl_math.log(tmp104) tmp106 = tmp103 - tmp105 tmp107 = tmp106 * tmp54 tmp108 = tmp97 + tmp107 tmp109 = -tmp65 tmp110 = -tmp108 tmp111 = tmp109 + tmp110 tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK]) tmp114 = tl.sum(tmp112, 1)[:, None] tmp115 = 64.0 tmp116 = tmp114 / tmp115 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp116, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [setitem_1], Original ATen: [aten.lift_fresh, aten.fill] stream0 = get_raw_stream(0) triton_poi_fused_fill_lift_fresh_0.run(arg0_1, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, n_mask, sub_3, mul_2, logit2, log_softmax_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax] triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1.run(arg1_1, arg0_1, buf1, buf2, buf4, buf5, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [th_label, setitem, p_mask, sub_1, mul, logit1, log_softmax, mul_1, sum_1, loss1, n_mask, sub_3, mul_2, logit2, log_softmax_1, mul_3, sum_2, loss2, loss, loss_1], Original ATen: [aten.zeros_like, aten.lift_fresh, aten.fill, aten.add, aten.rsub, aten.mul, aten.sub, aten._log_softmax, aten.sum, aten.neg, aten.mean] triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2.run(buf8, arg1_1, arg0_1, buf1, buf2, buf4, buf5, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del buf1 del buf2 del buf4 del buf5 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn import torch.nn.functional as F class ATLoss(nn.Module): def __init__(self): super().__init__() def forward(self, logits: 'Tensor', labels: 'Tensor') ->float: """ Args: logits: predicted probabilities (shape: batch size x num classes) labels: one-hot encoded true labels (shape: batch size x num classes) """ th_label = torch.zeros_like(labels, dtype=torch.float) th_label[:, 0] = 1.0 labels[:, 0] = 0.0 p_mask = labels + th_label n_mask = 1 - labels logit1 = logits - (1 - p_mask) * 1e+30 loss1 = -(F.log_softmax(logit1, dim=-1) * labels).sum(1) logit2 = logits - (1 - n_mask) * 1e+30 loss2 = -(F.log_softmax(logit2, dim=-1) * th_label).sum(1) loss = loss1 + loss2 loss = loss.mean() return loss def get_label(self, logits: 'Tensor', num_labels: 'int'=-1, threshold: 'float'=None) ->Tensor: """ Calculated the labels """ if threshold: th_logit = torch.full((len(logits), 1), threshold) else: th_logit = logits[:, 0].unsqueeze(1) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output.sum(1) == 0.0 return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 tmp0 = 0.0 tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1( in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp28 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = x1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp5 = 1.0 tmp6 = 0.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp1 + tmp7 tmp9 = tmp5 - tmp8 tmp10 = 1e+30 tmp11 = tmp9 * tmp10 tmp12 = tmp0 - tmp11 tmp15 = tmp14 + tmp7 tmp16 = tmp5 - tmp15 tmp17 = tmp16 * tmp10 tmp18 = tmp13 - tmp17 tmp19 = triton_helpers.maximum(tmp12, tmp18) tmp22 = tmp21 + tmp7 tmp23 = tmp5 - tmp22 tmp24 = tmp23 * tmp10 tmp25 = tmp20 - tmp24 tmp26 = triton_helpers.maximum(tmp19, tmp25) tmp29 = tmp28 + tmp7 tmp30 = tmp5 - tmp29 tmp31 = tmp30 * tmp10 tmp32 = tmp27 - tmp31 tmp33 = triton_helpers.maximum(tmp26, tmp32) tmp34 = tmp12 - tmp33 tmp35 = tl_math.exp(tmp34) tmp36 = tmp18 - tmp33 tmp37 = tl_math.exp(tmp36) tmp38 = tmp35 + tmp37 tmp39 = tmp25 - tmp33 tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tmp42 = tmp32 - tmp33 tmp43 = tl_math.exp(tmp42) tmp44 = tmp41 + tmp43 tmp45 = tmp5 - tmp1 tmp46 = tmp5 - tmp45 tmp47 = tmp46 * tmp10 tmp48 = tmp0 - tmp47 tmp49 = tmp5 - tmp14 tmp50 = tmp5 - tmp49 tmp51 = tmp50 * tmp10 tmp52 = tmp13 - tmp51 tmp53 = triton_helpers.maximum(tmp48, tmp52) tmp54 = tmp5 - tmp21 tmp55 = tmp5 - tmp54 tmp56 = tmp55 * tmp10 tmp57 = tmp20 - tmp56 tmp58 = triton_helpers.maximum(tmp53, tmp57) tmp59 = tmp5 - tmp28 tmp60 = tmp5 - tmp59 tmp61 = tmp60 * tmp10 tmp62 = tmp27 - tmp61 tmp63 = triton_helpers.maximum(tmp58, tmp62) tmp64 = tmp48 - tmp63 tmp65 = tl_math.exp(tmp64) tmp66 = tmp52 - tmp63 tmp67 = tl_math.exp(tmp66) tmp68 = tmp65 + tmp67 tmp69 = tmp57 - tmp63 tmp70 = tl_math.exp(tmp69) tmp71 = tmp68 + tmp70 tmp72 = tmp62 - tmp63 tmp73 = tl_math.exp(tmp72) tmp74 = tmp71 + tmp73 tl.store(out_ptr0 + x3, tmp33, xmask) tl.store(out_ptr1 + x3, tmp44, xmask) tl.store(out_ptr2 + x3, tmp63, xmask) tl.store(out_ptr3 + x3, tmp74, xmask) @triton.jit def triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 16 r4 = rindex % 16 r1 = rindex // 4 % 4 tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None) tmp1 = tl.load(in_ptr1 + (r4 + 64 * r2), None) tmp12 = tl.load(in_ptr2 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr3 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None) tmp19 = tl.load(in_ptr1 + (16 + r4 + 64 * r2), None) tmp27 = tl.load(in_ptr2 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr3 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None) tmp35 = tl.load(in_ptr1 + (32 + r4 + 64 * r2), None) tmp43 = tl.load(in_ptr2 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp45 = tl.load(in_ptr3 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None) tmp51 = tl.load(in_ptr1 + (48 + r4 + 64 * r2), None) tmp59 = tl.load(in_ptr2 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp61 = tl.load(in_ptr3 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr4 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp72 = tl.load(in_ptr5 + (r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp80 = tl.load(in_ptr4 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp82 = tl.load(in_ptr5 + (4 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp91 = tl.load(in_ptr4 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp93 = tl.load(in_ptr5 + (8 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp102 = tl.load(in_ptr4 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp104 = tl.load(in_ptr5 + (12 + r1 + 16 * r2), None, eviction_policy= 'evict_last') tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = tmp2 == tmp2 tmp4 = 1.0 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp1 + tmp6 tmp8 = tmp4 - tmp7 tmp9 = 1e+30 tmp10 = tmp8 * tmp9 tmp11 = tmp0 - tmp10 tmp13 = tmp11 - tmp12 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 - tmp15 tmp17 = tmp16 * tmp1 tmp20 = tl.full([1, 1], 1, tl.int32) tmp21 = tmp20 == tmp2 tmp22 = tl.where(tmp21, tmp4, tmp5) tmp23 = tmp19 + tmp22 tmp24 = tmp4 - tmp23 tmp25 = tmp24 * tmp9 tmp26 = tmp18 - tmp25 tmp28 = tmp26 - tmp27 tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tmp32 = tmp31 * tmp19 tmp33 = tmp17 + tmp32 tmp36 = tl.full([1, 1], 2, tl.int32) tmp37 = tmp36 == tmp2 tmp38 = tl.where(tmp37, tmp4, tmp5) tmp39 = tmp35 + tmp38 tmp40 = tmp4 - tmp39 tmp41 = tmp40 * tmp9 tmp42 = tmp34 - tmp41 tmp44 = tmp42 - tmp43 tmp46 = tl_math.log(tmp45) tmp47 = tmp44 - tmp46 tmp48 = tmp47 * tmp35 tmp49 = tmp33 + tmp48 tmp52 = tl.full([1, 1], 3, tl.int32) tmp53 = tmp52 == tmp2 tmp54 = tl.where(tmp53, tmp4, tmp5) tmp55 = tmp51 + tmp54 tmp56 = tmp4 - tmp55 tmp57 = tmp56 * tmp9 tmp58 = tmp50 - tmp57 tmp60 = tmp58 - tmp59 tmp62 = tl_math.log(tmp61) tmp63 = tmp60 - tmp62 tmp64 = tmp63 * tmp51 tmp65 = tmp49 + tmp64 tmp66 = tmp4 - tmp1 tmp67 = tmp4 - tmp66 tmp68 = tmp67 * tmp9 tmp69 = tmp0 - tmp68 tmp71 = tmp69 - tmp70 tmp73 = tl_math.log(tmp72) tmp74 = tmp71 - tmp73 tmp75 = tmp74 * tmp6 tmp76 = tmp4 - tmp19 tmp77 = tmp4 - tmp76 tmp78 = tmp77 * tmp9 tmp79 = tmp18 - tmp78 tmp81 = tmp79 - tmp80 tmp83 = tl_math.log(tmp82) tmp84 = tmp81 - tmp83 tmp85 = tmp84 * tmp22 tmp86 = tmp75 + tmp85 tmp87 = tmp4 - tmp35 tmp88 = tmp4 - tmp87 tmp89 = tmp88 * tmp9 tmp90 = tmp34 - tmp89 tmp92 = tmp90 - tmp91 tmp94 = tl_math.log(tmp93) tmp95 = tmp92 - tmp94 tmp96 = tmp95 * tmp38 tmp97 = tmp86 + tmp96 tmp98 = tmp4 - tmp51 tmp99 = tmp4 - tmp98 tmp100 = tmp99 * tmp9 tmp101 = tmp50 - tmp100 tmp103 = tmp101 - tmp102 tmp105 = tl_math.log(tmp104) tmp106 = tmp103 - tmp105 tmp107 = tmp106 * tmp54 tmp108 = tmp97 + tmp107 tmp109 = -tmp65 tmp110 = -tmp108 tmp111 = tmp109 + tmp110 tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK]) tmp114 = tl.sum(tmp112, 1)[:, None] tmp115 = 64.0 tmp116 = tmp114 / tmp115 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp116, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_fill_lift_fresh_0[grid(64)](arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__log_softmax_add_fill_lift_fresh_mul_rsub_sub_zeros_like_1[ grid(64)](arg1_1, arg0_1, buf1, buf2, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused__log_softmax_add_fill_lift_fresh_mean_mul_neg_rsub_sub_sum_zeros_like_2[ grid(1)](buf8, arg1_1, arg0_1, buf1, buf2, buf4, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 del buf2 del buf4 del buf5 return buf8, class ATLossNew(nn.Module): def __init__(self): super().__init__() def get_label(self, logits: 'Tensor', num_labels: 'int'=-1, threshold: 'float'=None) ->Tensor: """ Calculated the labels """ if threshold: th_logit = torch.full((len(logits), 1), threshold) else: th_logit = logits[:, 0].unsqueeze(1) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output.sum(1) == 0.0 return output def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/DeepPavlov
ATLoss
false
16,568
[ "Apache-2.0" ]
5,893
08555428388fed3c7b036c0a82a70a25efcabcff
https://github.com/techthiyanes/DeepPavlov/tree/08555428388fed3c7b036c0a82a70a25efcabcff
SpatialDepthWisePerHeadConvolution
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jb/cjbf3ssum7resbwampiwoknxcnzh4uzdy4fhoaakjojloew6qlw5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1], [2], [1], False, [0], 16), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6z/c6zyvii2c5e5dc43sgzfkfbsfrhltqlojfzhuoqqgpavg7xdvriv.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1], [2], [1], False, [0], 16), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 6) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=16, bias=None) assert_size_stride(buf1, (4, 16, 6), (96, 6, 1)) del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 384, grid=grid(384), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0), primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (16, 1, 64), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWisePerHeadConvolution(Module): """ ## Spatial Depth Wise Per Head Convolution """ def __init__(self, heads: 'int', d_k: 'int', kernel_size: 'int'=3): """ * `heads` is the number of heads * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ seq_len, batch_size, heads, d_k = x.shape x = x.permute(1, 2, 3, 0) x = x.view(batch_size, heads * d_k, seq_len) x = self.conv(x) x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, d_k, seq_len) x = x.permute(3, 0, 1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'heads': 4, 'd_k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=16, bias=None) assert_size_stride(buf1, (4, 16, 6), (96, 6, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(384)](buf2, primals_3, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4, 4), (1, 96, 24, 6), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 16, 4), (16, 1, 64), 0) class SpatialDepthWisePerHeadConvolutionNew(Module): """ ## Spatial Depth Wise Per Head Convolution """ def __init__(self, heads: 'int', d_k: 'int', kernel_size: 'int'=3): """ * `heads` is the number of heads * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpatialDepthWisePerHeadConvolution
false
16,569
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Squash
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ym/cym3ccudngmvatoe6w7myo62gop3lu5z7s5oobh4dsa5ywnpodvj.py # Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, mul], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # pow_1 => pow_1 # s2 => sum_1 # sqrt => sqrt # truediv => div # truediv_1 => div_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, mul], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquashNew(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Squash
false
16,570
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SpacialGatingUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/m6/cm6bwhdk6ccdc7sc4qacfvqqjzmregk7iugudvoesmhwtekpv57y.py # Topologically Sorted Source Nodes: [z2_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # z2_1 => clone, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_1,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tp/ctprz4mscsdm7l4jvnnrdw6hhotjnj3e7dfnm67popopmu3ntjay.py # Topologically Sorted Source Nodes: [z2_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # z2_1 => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_1,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_3), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/42/c42m6aelphl7hlr7hwg5usoio6dqarsont3w6ceadrk4ycaves4f.py # Topologically Sorted Source Nodes: [z2_2, mul], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # mul => mul_2 # z2_2 => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %unsqueeze_4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %add_2), kwargs = {}) triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x3 = (xindex // 2) x4 = xindex x2 = (xindex // 8) tmp0 = tl.load(in_ptr0 + (x0 + (4*x3)), xmask) tmp1 = tl.load(in_out_ptr0 + (x4), xmask) tmp2 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(in_out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (2, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [z2_1], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [z2_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, primals_2, primals_3, buf1, buf2, 32, grid=grid(32), stream=stream0) del buf0 del primals_2 del primals_3 buf3 = empty_strided_cuda((1, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 8), (0, 8, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 2), (8, 2, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [z2_2, mul], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_2.run(buf4, primals_1, primals_5, 32, grid=grid(32), stream=stream0) del primals_5 return (buf4, reinterpret_tensor(primals_1, (4, 4, 2), (16, 4, 1), 0), buf1, reinterpret_tensor(primals_4, (1, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (1, 8, 4), (32, 1, 8), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data from typing import Optional import torch.nn.functional import torch.autograd class SpacialGatingUnit(nn.Module): """ ## Spatial Gating Unit $$s(Z) = Z_1 \\odot f_{W,b}(Z_2)$$ where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension, and $\\odot$ is element-wise multiplication. $Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension). """ def __init__(self, d_z: 'int', seq_len: 'int'): """ * `d_z` is the dimensionality of $Z$ * `seq_len` is the sequence length """ super().__init__() self.norm = nn.LayerNorm([d_z // 2]) self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(- 0.01, 0.01), requires_grad=True) self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True) def forward(self, z: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None): """ * `z` is the input $Z$ of shape `[seq_len, batch_size, d_z]` * `mask` is is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens among each other. The last dimension of size `1` is the batch, which we have in other transformer implementations and was left for compatibility. """ seq_len = z.shape[0] z1, z2 = torch.chunk(z, 2, dim=-1) if mask is not None: assert mask.shape[0] == 1 or mask.shape[0] == seq_len assert mask.shape[1] == seq_len assert mask.shape[2] == 1 mask = mask[:, :, 0] z2 = self.norm(z2) weight = self.weight[:seq_len, :seq_len] if mask is not None: weight = weight * mask z2 = torch.einsum('ij,jbd->ibd', weight, z2) + self.bias[:seq_len, None, None] return z1 * z2 def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_z': 4, 'seq_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x3 = xindex // 2 x4 = xindex x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x3), xmask) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(32)](primals_1, buf0, primals_2, primals_3, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf0 del primals_2 del primals_3 buf3 = empty_strided_cuda((1, 4, 8), (32, 8, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (1, 4, 8), (0, 8, 1), 0), out=buf3 ) buf4 = reinterpret_tensor(buf3, (4, 4, 2), (8, 2, 1), 0) del buf3 triton_poi_fused_add_mul_2[grid(32)](buf4, primals_1, primals_5, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 return buf4, reinterpret_tensor(primals_1, (4, 4, 2), (16, 4, 1), 0 ), buf1, reinterpret_tensor(primals_4, (1, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (1, 8, 4), (32, 1, 8), 0) class SpacialGatingUnitNew(nn.Module): """ ## Spatial Gating Unit $$s(Z) = Z_1 \\odot f_{W,b}(Z_2)$$ where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension, and $\\odot$ is element-wise multiplication. $Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension). """ def __init__(self, d_z: 'int', seq_len: 'int'): """ * `d_z` is the dimensionality of $Z$ * `seq_len` is the sequence length """ super().__init__() self.norm = nn.LayerNorm([d_z // 2]) self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(- 0.01, 0.01), requires_grad=True) self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True) def forward(self, input_0): primals_4 = self.weight primals_5 = self.bias primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpacialGatingUnit
false
16,571
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SpatialDepthWiseConvolution
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6j/c6jwd535crsaxcf2vn6fo7tivf6rewo3kc7wmj2smnibmmu6vbwp.py # Topologically Sorted Source Nodes: [res, mul_1, iadd, mul_2, iadd_1], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # iadd => add # iadd_1 => add_1 # mul_1 => mul_1 # mul_2 => mul_2 # res => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_2, %view_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_1, %mul_1), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%mul, %add, 0, 1, 9223372036854775807), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_3, 0, 1, 9223372036854775807), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_10, %view_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_11, %mul_2), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %add_1, 0, 2, 9223372036854775807), kwargs = {}) # %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %slice_12, 0, 2, 9223372036854775807), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 22, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 64) x4 = xindex x0 = xindex % 4 tmp61 = tl.load(in_ptr0 + (x4), xmask) tmp62 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 >= tmp3 tmp5 = tmp4 & tmp2 tmp6 = tmp4 & tmp5 tmp7 = tl.load(in_ptr0 + (x4), tmp6 & xmask, other=0.0) tmp8 = tl.load(in_ptr1 + (x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + ((-64) + x4), tmp6 & xmask, other=0.0) tmp11 = tl.load(in_ptr1 + (4 + x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.load(in_ptr0 + (x4), tmp5 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + (x0), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.where(tmp4, tmp15, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp5, tmp19, tmp20) tmp22 = tl.load(in_ptr0 + ((-64) + x4), tmp5 & xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (4 + x0), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp5, tmp25, tmp26) tmp28 = tl.load(in_ptr0 + (x4), tmp2 & xmask, other=0.0) tmp29 = tl.load(in_ptr1 + (x0), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tl.where(tmp4, tmp27, tmp30) tmp32 = tl.where(tmp4, tmp21, tmp31) tmp33 = tl.load(in_ptr0 + ((-128) + x4), tmp2 & xmask, other=0.0) tmp34 = tl.load(in_ptr1 + (8 + x0), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp4 & tmp4 tmp40 = tl.load(in_ptr0 + (x4), tmp39 & xmask, other=0.0) tmp41 = tl.load(in_ptr1 + (x0), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tmp40 * tmp41 tmp43 = tl.load(in_ptr0 + ((-64) + x4), tmp39 & xmask, other=0.0) tmp44 = tl.load(in_ptr1 + (4 + x0), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp45 = tmp43 * tmp44 tmp46 = tmp42 + tmp45 tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.load(in_ptr0 + (x4), tmp4 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp49 * tmp50 tmp52 = tl.where(tmp4, tmp48, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp4, tmp52, tmp53) tmp55 = tl.load(in_ptr0 + ((-64) + x4), tmp4 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (4 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp55 * tmp56 tmp58 = tmp51 + tmp57 tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype) tmp60 = tl.where(tmp4, tmp58, tmp59) tmp63 = tmp61 * tmp62 tmp64 = tl.where(tmp4, tmp60, tmp63) tmp65 = tl.where(tmp4, tmp54, tmp64) tmp66 = tl.where(tmp2, tmp38, tmp65) tmp67 = tl.where(tmp2, tmp66, tmp66) tl.store(in_out_ptr0 + (x4), tmp67, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (3, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [res, mul_1, iadd, mul_2, iadd_1], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(buf1, primals_2, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 return (buf1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import math import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class SpatialDepthWiseConvolution(Module): """ ## Spatial Depth Wise Convolution This is actually slower """ def __init__(self, d_k: 'int', kernel_size: 'int'=3): """ * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size rng = 1 / math.sqrt(kernel_size) self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)). uniform_(-rng, rng)) def forward(self, x: 'torch.Tensor'): """ `x` has shape `[seq_len, batch_size, heads, d_k]` """ res = x * self.kernels[0].view(1, 1, 1, -1) for i in range(1, len(self.kernels)): res[i:] += x[:-i] * self.kernels[i].view(1, 1, 1, -1) return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_k': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex x0 = xindex % 4 tmp61 = tl.load(in_ptr0 + x4, xmask) tmp62 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 >= tmp3 tmp5 = tmp4 & tmp2 tmp6 = tmp4 & tmp5 tmp7 = tl.load(in_ptr0 + x4, tmp6 & xmask, other=0.0) tmp8 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (-64 + x4), tmp6 & xmask, other=0.0) tmp11 = tl.load(in_ptr1 + (4 + x0), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.load(in_ptr0 + x4, tmp5 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x0, tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.where(tmp4, tmp15, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp5, tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (-64 + x4), tmp5 & xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (4 + x0), tmp5 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp18 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp5, tmp25, tmp26) tmp28 = tl.load(in_ptr0 + x4, tmp2 & xmask, other=0.0) tmp29 = tl.load(in_ptr1 + x0, tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tl.where(tmp4, tmp27, tmp30) tmp32 = tl.where(tmp4, tmp21, tmp31) tmp33 = tl.load(in_ptr0 + (-128 + x4), tmp2 & xmask, other=0.0) tmp34 = tl.load(in_ptr1 + (8 + x0), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp4 & tmp4 tmp40 = tl.load(in_ptr0 + x4, tmp39 & xmask, other=0.0) tmp41 = tl.load(in_ptr1 + x0, tmp39 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tmp40 * tmp41 tmp43 = tl.load(in_ptr0 + (-64 + x4), tmp39 & xmask, other=0.0) tmp44 = tl.load(in_ptr1 + (4 + x0), tmp39 & xmask, eviction_policy= 'evict_last', other=0.0) tmp45 = tmp43 * tmp44 tmp46 = tmp42 + tmp45 tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp39, tmp46, tmp47) tmp49 = tl.load(in_ptr0 + x4, tmp4 & xmask, other=0.0) tmp50 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp51 = tmp49 * tmp50 tmp52 = tl.where(tmp4, tmp48, tmp51) tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp4, tmp52, tmp53) tmp55 = tl.load(in_ptr0 + (-64 + x4), tmp4 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tmp55 * tmp56 tmp58 = tmp51 + tmp57 tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype) tmp60 = tl.where(tmp4, tmp58, tmp59) tmp63 = tmp61 * tmp62 tmp64 = tl.where(tmp4, tmp60, tmp63) tmp65 = tl.where(tmp4, tmp54, tmp64) tmp66 = tl.where(tmp2, tmp38, tmp65) tmp67 = tl.where(tmp2, tmp66, tmp66) tl.store(in_out_ptr0 + x4, tmp67, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (3, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_2, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf1, primals_2 class SpatialDepthWiseConvolutionNew(Module): """ ## Spatial Depth Wise Convolution This is actually slower """ def __init__(self, d_k: 'int', kernel_size: 'int'=3): """ * `d_k` is the number of channels in each head """ super().__init__() self.kernel_size = kernel_size rng = 1 / math.sqrt(kernel_size) self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)). uniform_(-rng, rng)) def forward(self, input_0): primals_1 = self.kernels primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SpatialDepthWiseConvolution
false
16,572
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
PatchEmbeddings
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (1, 4, 4), (1, 4, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class PatchEmbeddings(Module): """ <a id="PatchEmbeddings"></a> ## Get patch embeddings The paper splits the image into patches of equal size and do a linear transformation on the flattened pixels for each patch. We implement the same thing through a convolution layer, because it's simpler to implement. """ def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'): """ * `d_model` is the transformer embeddings size * `patch_size` is the size of the patch * `in_channels` is the number of channels in the input image (3 for rgb) """ super().__init__() self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride= patch_size) def forward(self, x: 'torch.Tensor'): """ * `x` is the input image of shape `[batch_size, channels, height, width]` """ x = self.conv(x) bs, c, h, w = x.shape x = x.permute(2, 3, 0, 1) x = x.view(h * w, bs, c) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'patch_size': 4, 'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (1, 4, 4), (1, 4, 1), 0 ), primals_1, primals_3 class PatchEmbeddingsNew(Module): """ <a id="PatchEmbeddings"></a> ## Get patch embeddings The paper splits the image into patches of equal size and do a linear transformation on the flattened pixels for each patch. We implement the same thing through a convolution layer, because it's simpler to implement. """ def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'): """ * `d_model` is the transformer embeddings size * `patch_size` is the size of the patch * `in_channels` is the number of channels in the input image (3 for rgb) """ super().__init__() self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride= patch_size) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
PatchEmbeddings
false
16,573
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t6/ct6tjcg37hwxssa3rmolxu36szytdluhb36zohxf24euvezqpnz3.py # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # weights_1 => mul_2 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %unsqueeze_2), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ce/ccebvj5ndujasz3ies2kagzmfbbfis526scj32d65l5wlmbl3ve2.py # Topologically Sorted Source Nodes: [add, leaky_relu], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add => add # leaky_relu => gt, mul_3, where # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %unsqueeze_6), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul_3), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_2 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x3), tmp7, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [style], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_5, buf1, buf2, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 12, 4, 4), (192, 16, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf3 # reuse buf5 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, leaky_relu], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_2.run(buf4, primals_6, buf5, 192, grid=grid(192), stream=stream0) del primals_6 return (buf4, primals_3, primals_5, buf1, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((3, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_1[grid(48)](primals_5, buf1, buf2, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 12, 4, 4), (192, 16, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_2[grid(192)](buf4, primals_6, buf5, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf4, primals_3, primals_5, buf1, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf5 class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class ToRGBNew(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, input_0, input_1): primals_6 = self.bias primals_2 = self.to_style.bias primals_1 = self.to_style.weight.weight primals_5 = self.conv.weight.weight primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
ToRGB
false
16,574
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yn/cynbldmr7p3iqlxaz2m4g3wb7mwkas4tipve2qeoi5wjsayi7bng.py # Topologically Sorted Source Nodes: [mul, intersection, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => sum_1 # mul => mul # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) tl.store(out_ptr2 + (x0), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3k/c3kv2daba65ym5jpbiaw4bhf5crwsemu3nswcg3rtinfrderhiza.py # Topologically Sorted Source Nodes: [mul_1, add, add_1, add_2, truediv, loss, loss_1], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # loss => sub # loss_1 => mean # mul_1 => mul_1 # truediv => div # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tmp3 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, ), (1, ), torch.float32) buf1 = empty_strided_cuda((16, ), (1, ), torch.float32) buf2 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul, intersection, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul_1, add, add_1, add_2, truediv, loss, loss_1], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean] triton_per_fused_add_div_mean_mul_rsub_1.run(buf4, buf0, buf1, buf2, 1, 16, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.hub def dice_loss(input, target): smooth = 1.0 input = torch.sigmoid(input) if input.dim() == 4: B, C, _H, _W = input.size() iflat = input.view(B * C, -1) tflat = target.view(B * C, -1) else: assert input.dim() == 3 B, _H, _W = input.size() iflat = input.view(B, -1) tflat = target.view(B, -1) intersection = (iflat * tflat).sum(dim=1) loss = 1 - (2.0 * intersection + smooth) / (iflat.sum(dim=1) + tflat. sum(dim=1) + smooth) loss = loss.mean() return loss class DiceLoss(nn.Module): def __init__(self): super().__init__() pass def forward(self, input, target): return dice_loss(input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tmp3 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) buf1 = empty_strided_cuda((16,), (1,), torch.float32) buf2 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(16)](arg0_1, arg1_1, buf0, buf1, buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, def dice_loss(input, target): smooth = 1.0 input = torch.sigmoid(input) if input.dim() == 4: B, C, _H, _W = input.size() iflat = input.view(B * C, -1) tflat = target.view(B * C, -1) else: assert input.dim() == 3 B, _H, _W = input.size() iflat = input.view(B, -1) tflat = target.view(B, -1) intersection = (iflat * tflat).sum(dim=1) loss = 1 - (2.0 * intersection + smooth) / (iflat.sum(dim=1) + tflat. sum(dim=1) + smooth) loss = loss.mean() return loss class DiceLossNew(nn.Module): def __init__(self): super().__init__() pass def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
thangnx183/kaggle-understanding-clouds
DiceLoss
false
16,575
[ "BSD-2-Clause" ]
207
15ad2a9029958262437b899cb00525579da23911
https://github.com/thangnx183/kaggle-understanding-clouds/tree/15ad2a9029958262437b899cb00525579da23911
StyleBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wz/cwznwckxjekzcxlkjcgq2jhezeju57iivq5nhdjaezb3fvcxcau2.py # Topologically Sorted Source Nodes: [weights_1, pow_1, sum_1, add, sigma_inv, weights_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # pow_1 => pow_1 # sigma_inv => rsqrt # sum_1 => sum_1 # weights_1 => mul_2 # weights_2 => mul_3 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %unsqueeze_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %rsqrt), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_1 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = (rindex // 9) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (36*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (36*x4)), tmp13, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ra/cray4k4us3bbwyahj4pt3p7tsuiw557n7abg5krmhhniiqoibtti.py # Topologically Sorted Source Nodes: [mul_4, x_3, add_2, leaky_relu], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_2 => add_2 # leaky_relu => gt, mul_5, where # mul_4 => mul_4 # x_3 => add_1 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_6, %primals_6), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %mul_4), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %unsqueeze_9), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_5), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + (x3), tmp12, xmask) tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [s], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf0 # reuse buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_1, pow_1, sum_1, add, sigma_inv, weights_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_1.run(buf3, primals_5, buf1, buf4, 16, 36, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mul_4, x_3, add_2, leaky_relu], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2.run(buf6, primals_7, primals_6, primals_8, buf7, 256, grid=grid(256), stream=stream0) del primals_7 del primals_8 return (buf6, primals_3, primals_5, primals_6, buf1, buf3, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = rindex // 9 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 36 * x0), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), rmask & xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 36 * x4), tmp13, rmask & xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf3, primals_5, buf1, buf4, 16, 36, XBLOCK=8, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2[grid(256)]( buf6, primals_7, primals_6, primals_8, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 del primals_8 return (buf6, primals_3, primals_5, primals_6, buf1, buf3, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), buf7) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlockNew(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, input_0, input_1, input_2): primals_7 = self.scale_noise primals_2 = self.bias primals_8 = self.to_style.bias primals_1 = self.to_style.weight.weight primals_5 = self.conv.weight.weight primals_4 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
StyleBlock
false
16,576
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
AddTensors
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3r/c3rcleexr5nwq4qvwylgjpelwulrq7jjyvy54eszp24wxfm6tszs.py # Topologically Sorted Source Nodes: [add, add_1, add_2, add_3], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, 0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %select_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %select_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %select_3), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0), xmask) tmp5 = tl.load(in_ptr0 + (128 + x0), xmask) tmp7 = tl.load(in_ptr0 + (192 + x0), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, add_2, add_3], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.hub class AddTensors(nn.Module): """ Adds all its inputs together. """ def forward(self, xs): return sum(xs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (64 + x0), xmask) tmp5 = tl.load(in_ptr0 + (128 + x0), xmask) tmp7 = tl.load(in_ptr0 + (192 + x0), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class AddTensorsNew(nn.Module): """ Adds all its inputs together. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
theoway/raster-vision
AddTensors
false
16,577
[ "Apache-2.0" ]
1,577
dab675517f904771e2ce8c052494f8a6f1ddc026
https://github.com/theoway/raster-vision/tree/dab675517f904771e2ce8c052494f8a6f1ddc026
ACGANDiscriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/n3/cn3pwfiylc5cth7jk5f77jeway5lcoukpfnrppapdazr3hrkmrrq.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ov/covqd7o7g6fvrjwyuxsobtroi2lyqqwvw5dfkitsf3alpk5tc4qp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ba/cbayuw2by4w6xwduhs5qdriinmydiep6bpw7fyi37s377up7lrcm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p4/cp4wei76b6qz5pfcuwd2x5aoj7ndozsklvusxdkgxnnrk6js5dww.py # Topologically Sorted Source Nodes: [add, add_1, add_2, output], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # output => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_12), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_16), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 4), kwargs = {}) triton_poi_fused_add_div_3 = async_compile.triton('triton_poi_fused_add_div_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 2 x2 = (xindex // 6) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (24*x2)), xmask) tmp1 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (24*x2)), xmask) tmp3 = tl.load(in_ptr0 + (3 + x0 + (6*x1) + (24*x2)), xmask) tmp5 = tl.load(in_ptr0 + (15 + x0 + (6*x1) + (24*x2)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pl/cpl772kzk2untbywfegzyzg7aminsnqthl2mysgs73mlh244x4zg.py # Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_2 => convolution_1 # output_3 => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rl/crlq37bavp6co3onxc4bvk2y6jotsfh5uv6o2wo6pnims7pb6tee.py # Topologically Sorted Source Nodes: [output_1, add_3, add_4, add_5, output_5, output_6, output_9], Original ATen: [aten.convolution, aten.add, aten.div, aten.relu] # Source node to ATen node mapping: # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # output_1 => convolution # output_5 => div_1 # output_6 => add_6 # output_9 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_20, %slice_24), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %slice_28), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %slice_32), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_5, 4), kwargs = {}) # %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %div_1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_6,), kwargs = {}) triton_poi_fused_add_convolution_div_relu_5 = async_compile.triton('triton_poi_fused_add_convolution_div_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_div_relu_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x1 = (xindex // 128) % 2 x2 = (xindex // 256) tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (256*x1) + (1024*x2)), None) tmp4 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (512 + x0 + (256*x1) + (1024*x2)), None) tmp9 = tl.load(in_ptr1 + (128 + x0 + (256*x1) + (1024*x2)), None) tmp12 = tl.load(in_ptr1 + (640 + x0 + (256*x1) + (1024*x2)), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tl.store(in_out_ptr0 + (x3), tmp17, None) tl.store(out_ptr0 + (x3), tmp19, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3t/c3tqr5dg2huh5t7vqxogki255lx3taiv6rgufakqwbt7jujz6pxs.py # Topologically Sorted Source Nodes: [output_10, output_11], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_10 => convolution_4 # output_11 => relu_2 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gn/cgnp54wk2tj3nwsovfq7bzvyzskl2pmvcf567vjoeqnqpwigkh3l.py # Topologically Sorted Source Nodes: [add_7, add_8, add_9, output_8, add_10, add_11, add_12, output_13, output_14, output_16], Original ATen: [aten.add, aten.div, aten.relu] # Source node to ATen node mapping: # add_10 => add_10 # add_11 => add_11 # add_12 => add_12 # add_7 => add_7 # add_8 => add_8 # add_9 => add_9 # output_13 => div_3 # output_14 => add_13 # output_16 => relu_3 # output_8 => div_2 # Graph fragment: # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_36, %slice_40), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %slice_44), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %slice_48), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_9, 4), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_52, %slice_56), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %slice_60), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %slice_64), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, 4), kwargs = {}) # %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, %div_3), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_13,), kwargs = {}) triton_poi_fused_add_div_relu_7 = async_compile.triton('triton_poi_fused_add_div_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (256 + x0 + (512*x1)), xmask) tmp6 = tl.load(in_ptr0 + (128 + x0 + (512*x1)), xmask) tmp9 = tl.load(in_ptr0 + (384 + x0 + (512*x1)), xmask) tmp14 = tl.load(in_ptr2 + (x0 + (512*x1)), xmask) tmp15 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (256 + x0 + (512*x1)), xmask) tmp20 = tl.load(in_ptr2 + (128 + x0 + (512*x1)), xmask) tmp23 = tl.load(in_ptr2 + (384 + x0 + (512*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tmp16 = tmp14 + tmp15 tmp18 = tmp17 + tmp15 tmp19 = tmp16 + tmp18 tmp21 = tmp20 + tmp15 tmp22 = tmp19 + tmp21 tmp24 = tmp23 + tmp15 tmp25 = tmp22 + tmp24 tmp26 = tmp25 * tmp12 tmp27 = tmp13 + tmp26 tmp28 = tl.full([1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr0 + (x2), tmp27, xmask) tl.store(out_ptr1 + (x2), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ju/cjug5s7yxvqegp25wrcwctyzzf4nzvn5u4j2c2c2ssbfavokgljt.py # Topologically Sorted Source Nodes: [output_17, output_18], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_17 => convolution_6 # output_18 => relu_4 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/eb/cebdtxa2fysqjh5ltamrku7rghltuysq5yrxppjy3uhb2l3dia4u.py # Topologically Sorted Source Nodes: [output_19, output_20, output_22], Original ATen: [aten.convolution, aten.add, aten.relu] # Source node to ATen node mapping: # output_19 => convolution_7 # output_20 => add_14 # output_22 => relu_5 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %convolution_7), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_14,), kwargs = {}) triton_poi_fused_add_convolution_relu_9 = async_compile.triton('triton_poi_fused_add_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f4/cf4t43lvbp3onfgc6ycqi5drwnxpbwjrxhfple54ghh72fvcvf7u.py # Topologically Sorted Source Nodes: [output_19, output_20, output_25, output_26, output_28, mean, out_feat, output_29], Original ATen: [aten.convolution, aten.add, aten.relu, aten.mean, aten.threshold_backward] # Source node to ATen node mapping: # mean => mean # out_feat => mean_1 # output_19 => convolution_7 # output_20 => add_14 # output_25 => convolution_9 # output_26 => add_15 # output_28 => relu_7 # output_29 => mean_3 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %convolution_7), kwargs = {}) # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_14, %convolution_9), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_15,), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_7, [3]), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2]), kwargs = {}) # %mean_3 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2]), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {}) triton_poi_fused_add_convolution_mean_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_add_convolution_mean_relu_threshold_backward_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mean_relu_threshold_backward_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mean_relu_threshold_backward_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = 1.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 / tmp11 tmp14 = 0.0 tmp15 = tmp10 <= tmp14 tl.store(out_ptr0 + (x2), tmp13, xmask) tl.store(out_ptr1 + (x2), tmp13, xmask) tl.store(out_ptr2 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25 = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (128, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_3, (128, ), (1, )) assert_size_stride(primals_4, (128, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128, ), (1, )) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128, ), (1, )) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128, ), (1, )) assert_size_stride(primals_22, (1, 128), (128, 1)) assert_size_stride(primals_23, (1, ), (1, )) assert_size_stride(primals_24, (10, 128), (128, 1)) assert_size_stride(primals_25, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 1, 12, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 12, 16, grid=grid(12, 16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((128, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_4, buf1, 384, 9, grid=grid(384, 9), stream=stream0) del primals_4 buf2 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_6, buf2, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_10, buf3, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_12, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_12 buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_14, buf5, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_14 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_16, buf6, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_16 buf7 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_18, buf7, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_18 buf8 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_20, buf8, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_20 buf9 = empty_strided_cuda((4, 3, 2, 2), (12, 1, 6, 3), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, add_2, output], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_3.run(buf0, buf9, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 2, 2), (512, 1, 256, 128)) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 4, 4), (2048, 1, 512, 128)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf12, primals_5, 8192, grid=grid(8192), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 4, 4), (2048, 1, 512, 128)) buf14 = buf10; del buf10 # reuse buf16 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) # Topologically Sorted Source Nodes: [output_1, add_3, add_4, add_5, output_5, output_6, output_9], Original ATen: [aten.convolution, aten.add, aten.div, aten.relu] triton_poi_fused_add_convolution_div_relu_5.run(buf14, primals_3, buf13, primals_7, buf16, 2048, grid=grid(2048), stream=stream0) del buf13 del primals_3 del primals_7 # Topologically Sorted Source Nodes: [output_7], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 2, 2), (512, 1, 256, 128)) # Topologically Sorted Source Nodes: [output_10], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 2, 2), (512, 1, 256, 128)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [output_10, output_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf18, primals_11, 2048, grid=grid(2048), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [output_12], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 2, 2), (512, 1, 256, 128)) buf20 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf21 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [add_7, add_8, add_9, output_8, add_10, add_11, add_12, output_13, output_14, output_16], Original ATen: [aten.add, aten.div, aten.relu] triton_poi_fused_add_div_relu_7.run(buf15, primals_9, buf19, primals_13, buf20, buf21, 512, grid=grid(512), stream=stream0) del buf15 del buf19 del primals_13 del primals_9 # Topologically Sorted Source Nodes: [output_17], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 1, 1), (128, 1, 128, 128)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [output_17, output_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf23, primals_15, 512, grid=grid(512), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [output_19], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 1, 1), (128, 1, 128, 128)) buf25 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [output_19, output_20, output_22], Original ATen: [aten.convolution, aten.add, aten.relu] triton_poi_fused_add_convolution_relu_9.run(buf20, buf24, primals_17, buf25, 512, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [output_23], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 1, 1), (128, 1, 128, 128)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [output_23, output_24], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf27, primals_19, 512, grid=grid(512), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [output_25], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 1, 1), (128, 1, 128, 128)) buf29 = buf20; del buf20 # reuse buf30 = empty_strided_cuda((4, 128), (128, 1), torch.float32) buf31 = empty_strided_cuda((4, 128), (128, 1), torch.float32) buf35 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.bool) # Topologically Sorted Source Nodes: [output_19, output_20, output_25, output_26, output_28, mean, out_feat, output_29], Original ATen: [aten.convolution, aten.add, aten.relu, aten.mean, aten.threshold_backward] triton_poi_fused_add_convolution_mean_relu_threshold_backward_10.run(buf29, buf24, primals_17, buf28, primals_21, buf30, buf31, buf35, 512, grid=grid(512), stream=stream0) del buf24 del buf28 del buf29 del primals_17 del primals_21 buf33 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_dis], Original ATen: [aten.addmm] extern_kernels.addmm(primals_23, buf31, reinterpret_tensor(primals_22, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf33) del primals_23 buf34 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_cls], Original ATen: [aten.addmm] extern_kernels.addmm(primals_25, buf30, reinterpret_tensor(primals_24, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf34) del primals_25 return (reinterpret_tensor(buf33, (4, ), (1, ), 0), buf34, buf30, buf0, primals_2, buf1, buf2, primals_8, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf12, buf14, buf16, buf18, buf21, buf23, buf25, buf27, buf30, buf31, primals_24, primals_22, buf35, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.utils as utils import torch.nn.functional as F from torchvision import utils def global_pooling(input, pooling='mean'): if pooling == 'mean': return input.mean(3).mean(2) elif pooling == 'sum': return input.sum(3).sum(2) else: raise NotImplementedError() class CustomConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, bias=True, spectral_norm=False, residual_init=True): super(CustomConv2d, self).__init__() self.residual_init = residual_init if padding is None: padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) if spectral_norm: self.conv = utils.spectral_norm(self.conv) def forward(self, input): return self.conv(input) class CustomLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, spectral_norm= False): super(CustomLinear, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) if spectral_norm: self.linear = utils.spectral_norm(self.linear) def forward(self, input): return self.linear(input) class ConvMeanPool(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(ConvMeanPool, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = self.conv(output) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(MeanPoolConv, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, resample= None, spectral_norm=False): super(ResidualBlock, self).__init__() if in_channels != out_channels or resample is not None: self.learnable_shortcut = True else: self.learnable_shortcut = False self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() if resample == 'down': self.conv_shortcut = ConvMeanPool(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False ) self.conv1 = CustomConv2d(in_channels, in_channels, kernel_size =kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) elif resample is None: if self.learnable_shortcut: self.conv_shortcut = CustomConv2d(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) self.conv2 = CustomConv2d(out_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) else: raise NotImplementedError() def forward(self, input): if self.learnable_shortcut: shortcut = self.conv_shortcut(input) else: shortcut = input output = input output = self.relu1(output) output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class OptimizedResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, spectral_norm=False): super(OptimizedResidualBlock, self).__init__() self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv_shortcut = MeanPoolConv(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.relu2 = nn.ReLU() def forward(self, input): shortcut = self.conv_shortcut(input) output = input output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class ACGANDiscriminator(nn.Module): def __init__(self, num_classes=10, channels=128, dropout=False, spectral_norm=False, pooling='mean'): super(ACGANDiscriminator, self).__init__() self.num_classes = num_classes self.channels = channels self.dropout = dropout self.spectral_norm = spectral_norm self.pooling = pooling self.block1 = OptimizedResidualBlock(3, channels, 3, spectral_norm= spectral_norm) self.block2 = ResidualBlock(channels, channels, 3, resample='down', spectral_norm=spectral_norm) self.block3 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.block4 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.relu5 = nn.ReLU() self.linear5dis = CustomLinear(channels, 1, spectral_norm=spectral_norm ) self.linear5cls = CustomLinear(channels, num_classes) def forward(self, input, dropout=None): if dropout is None: dropout = self.dropout output = input output = self.block1(output) output = self.block2(output) output = F.dropout(output, p=0.2, training=dropout) output = self.block3(output) output = F.dropout(output, p=0.5, training=dropout) output = self.block4(output) output = F.dropout(output, p=0.5, training=dropout) output = self.relu5(output) out_feat = global_pooling(output, 'mean') output = global_pooling(output, self.pooling) out_dis = self.linear5dis(output) out_cls = self.linear5cls(out_feat) return out_dis.squeeze(), out_cls.squeeze(), out_feat def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.utils as utils from torchvision import utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_add_div_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 2 x2 = xindex // 6 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 24 * x2), xmask) tmp1 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 24 * x2), xmask) tmp3 = tl.load(in_ptr0 + (3 + x0 + 6 * x1 + 24 * x2), xmask) tmp5 = tl.load(in_ptr0 + (15 + x0 + 6 * x1 + 24 * x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_div_relu_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x1 = xindex // 128 % 2 x2 = xindex // 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 256 * x1 + 1024 * x2), None) tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (512 + x0 + 256 * x1 + 1024 * x2), None) tmp9 = tl.load(in_ptr1 + (128 + x0 + 256 * x1 + 1024 * x2), None) tmp12 = tl.load(in_ptr1 + (640 + x0 + 256 * x1 + 1024 * x2), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tl.store(in_out_ptr0 + x3, tmp17, None) tl.store(out_ptr0 + x3, tmp19, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_div_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (256 + x0 + 512 * x1), xmask) tmp6 = tl.load(in_ptr0 + (128 + x0 + 512 * x1), xmask) tmp9 = tl.load(in_ptr0 + (384 + x0 + 512 * x1), xmask) tmp14 = tl.load(in_ptr2 + (x0 + 512 * x1), xmask) tmp15 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (256 + x0 + 512 * x1), xmask) tmp20 = tl.load(in_ptr2 + (128 + x0 + 512 * x1), xmask) tmp23 = tl.load(in_ptr2 + (384 + x0 + 512 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tmp16 = tmp14 + tmp15 tmp18 = tmp17 + tmp15 tmp19 = tmp16 + tmp18 tmp21 = tmp20 + tmp15 tmp22 = tmp19 + tmp21 tmp24 = tmp23 + tmp15 tmp25 = tmp22 + tmp24 tmp26 = tmp25 * tmp12 tmp27 = tmp13 + tmp26 tmp28 = tl.full([1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr0 + x2, tmp27, xmask) tl.store(out_ptr1 + x2, tmp29, xmask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_convolution_mean_relu_threshold_backward_10( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = 1.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 / tmp11 tmp14 = 0.0 tmp15 = tmp10 <= tmp14 tl.store(out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) tl.store(out_ptr2 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (128, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_3, (128,), (1,)) assert_size_stride(primals_4, (128, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (1, 128), (128, 1)) assert_size_stride(primals_23, (1,), (1,)) assert_size_stride(primals_24, (10, 128), (128, 1)) assert_size_stride(primals_25, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 1, 12, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 16)](primals_1, buf0, 12, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((128, 3, 3, 3), (27, 1, 9, 3), torch.float32) triton_poi_fused_1[grid(384, 9)](primals_4, buf1, 384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_6, buf2, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_10, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf7 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_18, buf7, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf8 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_20, buf8, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf9 = empty_strided_cuda((4, 3, 2, 2), (12, 1, 6, 3), torch.float32) triton_poi_fused_add_div_3[grid(48)](buf0, buf9, 48, XBLOCK=64, num_warps=1, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 2, 2), (512, 1, 256, 128)) buf11 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 128, 4, 4), (2048, 1, 512, 128)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_4[grid(8192)](buf12, primals_5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf13 = extern_kernels.convolution(buf12, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 4, 4), (2048, 1, 512, 128)) buf14 = buf10 del buf10 buf16 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_add_convolution_div_relu_5[grid(2048)](buf14, primals_3, buf13, primals_7, buf16, 2048, XBLOCK=256, num_warps =4, num_stages=1) del buf13 del primals_3 del primals_7 buf15 = extern_kernels.convolution(buf14, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 2, 2), (512, 1, 256, 128)) buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 2, 2), (512, 1, 256, 128)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_6[grid(2048)](buf18, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf19 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 2, 2), (512, 1, 256, 128)) buf20 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf21 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) triton_poi_fused_add_div_relu_7[grid(512)](buf15, primals_9, buf19, primals_13, buf20, buf21, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf15 del buf19 del primals_13 del primals_9 buf22 = extern_kernels.convolution(buf21, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 1, 1), (128, 1, 128, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_8[grid(512)](buf23, primals_15, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 1, 1), (128, 1, 128, 128)) buf25 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) triton_poi_fused_add_convolution_relu_9[grid(512)](buf20, buf24, primals_17, buf25, 512, XBLOCK=256, num_warps=4, num_stages=1) buf26 = extern_kernels.convolution(buf25, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 1, 1), (128, 1, 128, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_8[grid(512)](buf27, primals_19, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf28 = extern_kernels.convolution(buf27, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 1, 1), (128, 1, 128, 128)) buf29 = buf20 del buf20 buf30 = empty_strided_cuda((4, 128), (128, 1), torch.float32) buf31 = empty_strided_cuda((4, 128), (128, 1), torch.float32) buf35 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.bool) triton_poi_fused_add_convolution_mean_relu_threshold_backward_10[grid (512)](buf29, buf24, primals_17, buf28, primals_21, buf30, buf31, buf35, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf24 del buf28 del buf29 del primals_17 del primals_21 buf33 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_23, buf31, reinterpret_tensor( primals_22, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf33) del primals_23 buf34 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_25, buf30, reinterpret_tensor( primals_24, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf34) del primals_25 return (reinterpret_tensor(buf33, (4,), (1,), 0), buf34, buf30, buf0, primals_2, buf1, buf2, primals_8, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf12, buf14, buf16, buf18, buf21, buf23, buf25, buf27, buf30, buf31, primals_24, primals_22, buf35) def global_pooling(input, pooling='mean'): if pooling == 'mean': return input.mean(3).mean(2) elif pooling == 'sum': return input.sum(3).sum(2) else: raise NotImplementedError() class CustomConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, bias=True, spectral_norm=False, residual_init=True): super(CustomConv2d, self).__init__() self.residual_init = residual_init if padding is None: padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) if spectral_norm: self.conv = utils.spectral_norm(self.conv) def forward(self, input): return self.conv(input) class CustomLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, spectral_norm= False): super(CustomLinear, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) if spectral_norm: self.linear = utils.spectral_norm(self.linear) def forward(self, input): return self.linear(input) class ConvMeanPool(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(ConvMeanPool, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = self.conv(output) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(MeanPoolConv, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, resample= None, spectral_norm=False): super(ResidualBlock, self).__init__() if in_channels != out_channels or resample is not None: self.learnable_shortcut = True else: self.learnable_shortcut = False self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() if resample == 'down': self.conv_shortcut = ConvMeanPool(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False ) self.conv1 = CustomConv2d(in_channels, in_channels, kernel_size =kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) elif resample is None: if self.learnable_shortcut: self.conv_shortcut = CustomConv2d(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) self.conv2 = CustomConv2d(out_channels, out_channels, kernel_size=kernel_size, spectral_norm=spectral_norm) else: raise NotImplementedError() def forward(self, input): if self.learnable_shortcut: shortcut = self.conv_shortcut(input) else: shortcut = input output = input output = self.relu1(output) output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class OptimizedResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, spectral_norm=False): super(OptimizedResidualBlock, self).__init__() self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv_shortcut = MeanPoolConv(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.relu2 = nn.ReLU() def forward(self, input): shortcut = self.conv_shortcut(input) output = input output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output class ACGANDiscriminatorNew(nn.Module): def __init__(self, num_classes=10, channels=128, dropout=False, spectral_norm=False, pooling='mean'): super(ACGANDiscriminatorNew, self).__init__() self.num_classes = num_classes self.channels = channels self.dropout = dropout self.spectral_norm = spectral_norm self.pooling = pooling self.block1 = OptimizedResidualBlock(3, channels, 3, spectral_norm= spectral_norm) self.block2 = ResidualBlock(channels, channels, 3, resample='down', spectral_norm=spectral_norm) self.block3 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.block4 = ResidualBlock(channels, channels, 3, resample=None, spectral_norm=spectral_norm) self.relu5 = nn.ReLU() self.linear5dis = CustomLinear(channels, 1, spectral_norm=spectral_norm ) self.linear5cls = CustomLinear(channels, num_classes) def forward(self, input_0): primals_4 = self.block1.conv1.conv.weight primals_3 = self.block1.conv1.conv.bias primals_6 = self.block1.conv2.conv.conv.weight primals_5 = self.block1.conv2.conv.conv.bias primals_2 = self.block1.conv_shortcut.conv.conv.weight primals_7 = self.block1.conv_shortcut.conv.conv.bias primals_8 = self.block2.conv_shortcut.conv.conv.weight primals_9 = self.block2.conv_shortcut.conv.conv.bias primals_10 = self.block2.conv1.conv.weight primals_11 = self.block2.conv1.conv.bias primals_12 = self.block2.conv2.conv.conv.weight primals_13 = self.block2.conv2.conv.conv.bias primals_14 = self.block3.conv1.conv.weight primals_15 = self.block3.conv1.conv.bias primals_16 = self.block3.conv2.conv.weight primals_17 = self.block3.conv2.conv.bias primals_18 = self.block4.conv1.conv.weight primals_19 = self.block4.conv1.conv.bias primals_20 = self.block4.conv2.conv.weight primals_21 = self.block4.conv2.conv.bias primals_22 = self.linear5dis.linear.weight primals_23 = self.linear5dis.linear.bias primals_24 = self.linear5cls.linear.weight primals_25 = self.linear5cls.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0], output[1], output[2]
takuhirok/rGAN
ACGANDiscriminator
false
16,578
[ "MIT" ]
103
6f7a092de5814c662fd17224b3d48bebe7e03c2f
https://github.com/takuhirok/rGAN/tree/6f7a092de5814c662fd17224b3d48bebe7e03c2f
EqualizedLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from torch import nn import torch.utils.data from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinearNew(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, input_0): primals_2 = self.bias primals_1 = self.weight.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
EqualizedLinear
false
16,579
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
SymmetricBCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jh/cjhtwuvzekeny3gg2m46kcjwgm2k5zv7spmwat2qizuv7clubbmm.py # Topologically Sorted Source Nodes: [loss_ce, y_pred_2, log, mul, loss_rce, y_true_2, log_1, mul_1, add], Original ATen: [aten.xlogy, aten.clamp, aten.log, aten.mul, aten.sub, aten.mean, aten.add] # Source node to ATen node mapping: # add => add # log => log # log_1 => log_2 # loss_ce => eq, full_default, full_default_1, isnan, log_1, mean, mul, mul_1, sub, where, where_1 # loss_rce => eq_1, full_default_2, full_default_3, isnan_1, log_3, mean_1, mul_2, mul_3, sub_1, where_2, where_3 # mul => mul_4 # mul_1 => mul_5 # y_pred_2 => clamp_max, clamp_min # y_true_2 => clamp_max_1, clamp_min_1 # Graph fragment: # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%view,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 1e-07), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %log), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.1), kwargs = {}) # %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%view_1,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_1, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %log_3), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view, 0.0001), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %log_2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0 = async_compile.triton('triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr1 + (r0), None) tmp1 = libdevice.isnan(tmp0).to(tl.int1) tmp2 = 0.0 tmp3 = tmp0 == tmp2 tmp4 = tl_math.log(tmp0) tmp5 = tmp0 * tmp4 tmp6 = tl.where(tmp3, tmp2, tmp5) tmp7 = float("nan") tmp8 = tl.where(tmp1, tmp7, tmp6) tmp10 = tl.sigmoid(tmp9) tmp11 = 1e-07 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = tl_math.log(tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp8 - tmp16 tmp18 = tl.broadcast_to(tmp17, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tmp21 = libdevice.isnan(tmp10).to(tl.int1) tmp22 = tmp10 == tmp2 tmp23 = tl_math.log(tmp10) tmp24 = tmp10 * tmp23 tmp25 = tl.where(tmp22, tmp2, tmp24) tmp26 = tl.where(tmp21, tmp7, tmp25) tmp27 = 0.0001 tmp28 = triton_helpers.maximum(tmp0, tmp27) tmp29 = triton_helpers.minimum(tmp28, tmp13) tmp30 = tl_math.log(tmp29) tmp31 = tmp10 * tmp30 tmp32 = tmp26 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 256.0 tmp37 = tmp20 / tmp36 tmp38 = 0.1 tmp39 = tmp37 * tmp38 tmp40 = tmp35 / tmp36 tmp41 = tmp40 * tmp38 tmp42 = tmp39 + tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp42, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss_ce, y_pred_2, log, mul, loss_rce, y_true_2, log_1, mul_1, add], Original ATen: [aten.xlogy, aten.clamp, aten.log, aten.mul, aten.sub, aten.mean, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.hub class SymmetricBCELoss(nn.Module): def __init__(self, alpha=0.1, beta=0.1): super().__init__() self.alpha = alpha self.beta = beta def forward(self, input, target): y_true = target y_pred = torch.sigmoid(input) y_true = y_true.view(-1, 1) y_pred = y_pred.view(-1, 1) y_true_1 = y_true y_pred_1 = y_pred y_true_2 = y_true y_pred_2 = y_pred y_pred_1 = torch.clamp(y_pred_1, 1e-07, 1.0) y_true_2 = torch.clamp(y_true_2, 0.0001, 1.0) loss_ce = F.kl_div(torch.log(y_pred_1), y_true_1) loss_rce = F.kl_div(torch.log(y_true_2), y_pred_2) return self.alpha * loss_ce + self.beta * loss_rce def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = libdevice.isnan(tmp0).to(tl.int1) tmp2 = 0.0 tmp3 = tmp0 == tmp2 tmp4 = tl_math.log(tmp0) tmp5 = tmp0 * tmp4 tmp6 = tl.where(tmp3, tmp2, tmp5) tmp7 = float('nan') tmp8 = tl.where(tmp1, tmp7, tmp6) tmp10 = tl.sigmoid(tmp9) tmp11 = 1e-07 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = tl_math.log(tmp14) tmp16 = tmp0 * tmp15 tmp17 = tmp8 - tmp16 tmp18 = tl.broadcast_to(tmp17, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tmp21 = libdevice.isnan(tmp10).to(tl.int1) tmp22 = tmp10 == tmp2 tmp23 = tl_math.log(tmp10) tmp24 = tmp10 * tmp23 tmp25 = tl.where(tmp22, tmp2, tmp24) tmp26 = tl.where(tmp21, tmp7, tmp25) tmp27 = 0.0001 tmp28 = triton_helpers.maximum(tmp0, tmp27) tmp29 = triton_helpers.minimum(tmp28, tmp13) tmp30 = tl_math.log(tmp29) tmp31 = tmp10 * tmp30 tmp32 = tmp26 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 256.0 tmp37 = tmp20 / tmp36 tmp38 = 0.1 tmp39 = tmp37 * tmp38 tmp40 = tmp35 / tmp36 tmp41 = tmp40 * tmp38 tmp42 = tmp39 + tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp42, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_log_mean_mul_sub_xlogy_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SymmetricBCELossNew(nn.Module): def __init__(self, alpha=0.1, beta=0.1): super().__init__() self.alpha = alpha self.beta = beta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
thangnx183/kaggle-understanding-clouds
SymmetricBCELoss
false
16,580
[ "BSD-2-Clause" ]
207
15ad2a9029958262437b899cb00525579da23911
https://github.com/thangnx183/kaggle-understanding-clouds/tree/15ad2a9029958262437b899cb00525579da23911
UpSample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ff/cffhfyg22xbribwqqqct5kdcsfampxjsxm645cabtsik2vspgfca.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = x0 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 + tmp2 tmp16 = tmp15 * tmp2 tmp17 = tmp16 - tmp2 tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp18.to(tl.int32) tmp20 = tmp19 + tmp9 tmp21 = triton_helpers.minimum(tmp20, tmp11) tmp22 = tl.load(in_ptr0 + (tmp21 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (tmp19 + (4*tmp12) + (16*x2)), xmask, eviction_policy='evict_last') tmp24 = tmp22 - tmp23 tmp25 = tmp19.to(tl.float32) tmp26 = tmp18 - tmp25 tmp27 = triton_helpers.maximum(tmp26, tmp6) tmp28 = 1.0 tmp29 = triton_helpers.minimum(tmp27, tmp28) tmp30 = tmp24 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp19 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp21 + (4*tmp8) + (16*x2)), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp8.to(tl.float32) tmp39 = tmp7 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp6) tmp41 = triton_helpers.minimum(tmp40, tmp28) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + (x4), tmp43, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/n3/cn3uxzu2hel4oq46hrfe2umbpvgsbsbptwqhsvbq3yzjtmhnf4c6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d] # Source node to ATen node mapping: # x_1 => _unsafe_index_4, _unsafe_index_5 # Graph fragment: # %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_2, [None, None, %clamp_max_4, None]), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %clamp_max_5]), kwargs = {}) triton_poi_fused_replication_pad2d_1 = async_compile.triton('triton_poi_fused_replication_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = (xindex // 10) % 10 x2 = (xindex // 100) x3 = xindex tmp0 = tl.load(in_ptr0 + ((8*((7) * ((7) <= (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0))))) + (((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) * ((((0) * ((0) >= ((-1) + x1)) + ((-1) + x1) * (((-1) + x1) > (0)))) < (7)))) + (64*x2) + ((7) * ((7) <= (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0))))) + (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) * ((((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) < (7)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, arg0_1, 1024, grid=grid(1024), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((16, 1, 10, 10), (100, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d] triton_poi_fused_replication_pad2d_1.run(buf2, buf3, 1600, grid=grid(1600), stream=stream0) del buf2 # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.replication_pad2d, aten.convolution] buf4 = extern_kernels.convolution(buf3, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (16, 1, 8, 8), (64, 64, 8, 1)) del arg1_1 del buf3 return (reinterpret_tensor(buf4, (4, 4, 8, 8), (256, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class UpSample(nn.Module): """ <a id="up_sample"></a> ### Up-sample The up-sample operation scales the image up by $2 imes$ and [smoothens](#smooth) each feature channel. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.smooth = Smooth() def forward(self, x: 'torch.Tensor'): return self.smooth(self.up_sample(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = x0 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 + tmp2 tmp16 = tmp15 * tmp2 tmp17 = tmp16 - tmp2 tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp18.to(tl.int32) tmp20 = tmp19 + tmp9 tmp21 = triton_helpers.minimum(tmp20, tmp11) tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp22 - tmp23 tmp25 = tmp19.to(tl.float32) tmp26 = tmp18 - tmp25 tmp27 = triton_helpers.maximum(tmp26, tmp6) tmp28 = 1.0 tmp29 = triton_helpers.minimum(tmp27, tmp28) tmp30 = tmp24 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp8.to(tl.float32) tmp39 = tmp7 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp6) tmp41 = triton_helpers.minimum(tmp40, tmp28) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + x4, tmp43, xmask) @triton.jit def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 % 10 x2 = xindex // 100 x3 = xindex tmp0 = tl.load(in_ptr0 + (8 * (7 * (7 <= 0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0) < 7)) + 64 * x2 + ( 7 * (7 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 7))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (1024)](buf2, arg0_1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((16, 1, 10, 10), (100, 100, 10, 1), torch .float32) triton_poi_fused_replication_pad2d_1[grid(1600)](buf2, buf3, 1600, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = extern_kernels.convolution(buf3, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (16, 1, 8, 8), (64, 64, 8, 1)) del arg1_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 8, 8), (256, 64, 8, 1), 0), class Smooth(nn.Module): """ <a id="smooth"></a> ### Smoothing Layer This layer blurs each channel """ def __init__(self): super().__init__() kernel = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] kernel = torch.tensor([[kernel]], dtype=torch.float) kernel /= kernel.sum() self.kernel = nn.Parameter(kernel, requires_grad=False) self.pad = nn.ReplicationPad2d(1) def forward(self, x: 'torch.Tensor'): b, c, h, w = x.shape x = x.view(-1, 1, h, w) x = self.pad(x) x = F.conv2d(x, self.kernel) return x.view(b, c, h, w) class UpSampleNew(nn.Module): """ <a id="up_sample"></a> ### Up-sample The up-sample operation scales the image up by $2 imes$ and [smoothens](#smooth) each feature channel. This is based on the paper [Making Convolutional Networks Shift-Invariant Again](https://papers.labml.ai/paper/1904.11486). """ def __init__(self): super().__init__() self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.smooth = Smooth() def forward(self, input_0): arg1_1 = self.smooth.kernel arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
UpSample
false
16,581
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
BertAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float("-inf") tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = (tmp29 != 0) tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = (tmp33 != 0) tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = (tmp38 != 0) tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = (tmp43 != 0) tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) tl.store(out_ptr2 + (x2), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_out_ptr0 + (x4), xmask) tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => var_mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l3/cl3bnd5pv2p4ydfmlj74bv4mbiwr2ntrdvbubnjubetyhosmxag6.py # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => add_2, add_3, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(buf13, primals_3, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del buf14 del buf15 del primals_12 return (buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): """ self attention层 原理可看这篇博客: http://jalammar.github.io/illustrated-transformer/ """ def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): """ 实现 self attention + Add & Norm """ def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5, layer_norm_eps=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_3, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_12 return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9 class BertSelfAttention(nn.Module): """ self attention层 原理可看这篇博客: http://jalammar.github.io/illustrated-transformer/ """ def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): """ 实现 self attention + Add & Norm """ def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
techthiyanes/nlp-notebook
BertAttention
false
16,582
[ "MIT" ]
136
0e5f4b75e635128d4056c89a6c65bea60c15e836
https://github.com/techthiyanes/nlp-notebook/tree/0e5f4b75e635128d4056c89a6c65bea60c15e836
moving_avg
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lt/cltmx7l3jx75f5n76p3gf3jhkvk3mvpqrgcjaor5qn3d7ssnzsgz.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%repeat, %arg0_1, %repeat_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x0 = xindex % 4 x2 = (xindex // 24) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + (4*((-1) + x1)) + (16*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 6, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wz/cwzylg4eb3aqxqkrml6khdtgxujb4phx76ox5xkbrov2svlklot3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_1 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze, [1, 4], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (24*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (24*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (24*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (24*x1)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 3), (12, 1, 48, 4), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_1.run(buf0, buf1, 48, grid=grid(48), stream=stream0) del buf0 return (reinterpret_tensor(buf1, (4, 3, 4), (12, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-1 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 24 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 24 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 24 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 24 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(96)](arg0_1, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 3), (12, 1, 48, 4), torch.float32) triton_poi_fused_avg_pool2d_1[grid(48)](buf0, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 3, 4), (12, 4, 1), 0), class moving_avgNew(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avgNew, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
thuml/Autoformer
moving_avg
false
16,583
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
GeneratorBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wz/cwznwckxjekzcxlkjcgq2jhezeju57iivq5nhdjaezb3fvcxcau2.py # Topologically Sorted Source Nodes: [weights_1, pow_1, sum_1, add, sigma_inv, weights_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # pow_1 => pow_1 # sigma_inv => rsqrt # sum_1 => sum_1 # weights_1 => mul_2 # weights_2 => mul_3 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %unsqueeze_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %rsqrt), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_1 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = (rindex // 9) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (36*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (36*x4)), tmp13, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mk/cmk6aftvifk5vtxtg2lvcslfabqnrroho76zhtuahfdwmzysuwiw.py # Topologically Sorted Source Nodes: [mul_4, x_3, add_2, x_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_2 => add_2 # mul_4 => mul_4 # x_3 => add_1 # x_4 => gt, mul_5, where # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_6, %select), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %mul_4), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %unsqueeze_9), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_5), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (x4), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cy/ccy6r6r4kfbnktjfnh5b72535yhbr4r6gtvhdjage2wywifwywz3.py # Topologically Sorted Source Nodes: [mul_9, x_8, add_5, x_9], Original ATen: [aten.mul, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # add_5 => add_5 # mul_9 => mul_10 # x_8 => add_4 # x_9 => gt_1, mul_11, where_1 # Graph fragment: # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_16, %select_1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %mul_10), kwargs = {}) # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %unsqueeze_19), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_5, 0), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_5, %mul_11), kwargs = {}) triton_poi_fused_add_leaky_relu_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tl.store(in_out_ptr0 + (x4), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tx/ctxhwgwusrrp4juzika66nlcnovoipxygsfsg3yjbj3mt4mcuato.py # Topologically Sorted Source Nodes: [weights_9], Original ATen: [aten.mul] # Source node to ATen node mapping: # weights_9 => mul_14 # Graph fragment: # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_23, %unsqueeze_22), kwargs = {}) triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q6/cq6266xbhcci7slv2fgafadqumjuzj6rlg3czend3eybumfoaegp.py # Topologically Sorted Source Nodes: [add_6, rgb], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_6 => add_6 # rgb => gt_2, mul_15, where_2 # Graph fragment: # %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %unsqueeze_26), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_6, %mul_15), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_2, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + (x3), tmp7, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_12, (1, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_17, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [s], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf0 # reuse buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_1, pow_1, sum_1, add, sigma_inv, weights_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_1.run(buf3, primals_6, buf1, buf4, 16, 36, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_5], Original ATen: [aten.mul] triton_poi_fused_mul_0.run(primals_9, buf6, 16, grid=grid(16), stream=stream0) del primals_9 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [s_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, primals_4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf6 # reuse buf9 = reinterpret_tensor(buf8, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf8 # reuse buf10 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_5, pow_2, sum_2, add_3, sigma_inv_1, weights_6], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_1.run(buf9, primals_11, buf7, buf10, 16, 36, grid=grid(16), stream=stream0) buf11 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mul_4, x_3, add_2, x_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2.run(buf11, primals_7, primals_1, primals_8, buf20, 256, grid=grid(256), stream=stream0) del primals_7 del primals_8 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 16, 4, 4), (0, 16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf12, (1, 16, 4, 4), (256, 16, 4, 1)) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [mul_9, x_8, add_5, x_9], Original ATen: [aten.mul, aten.add, aten.leaky_relu] triton_poi_fused_add_leaky_relu_mul_3.run(buf13, primals_12, primals_1, primals_13, 256, grid=grid(256), stream=stream0) del primals_12 del primals_13 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_10], Original ATen: [aten.mul] triton_poi_fused_mul_0.run(primals_14, buf14, 16, grid=grid(16), stream=stream0) del primals_14 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [style], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, primals_4, reinterpret_tensor(buf14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del buf14 del primals_15 buf16 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_9], Original ATen: [aten.mul] triton_poi_fused_mul_4.run(primals_16, buf15, buf16, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf17, (1, 12, 4, 4), (192, 16, 4, 1)) buf18 = reinterpret_tensor(buf17, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf17 # reuse buf19 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add_6, rgb], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_5.run(buf18, primals_17, buf19, 192, grid=grid(192), stream=stream0) del primals_17 return (buf13, buf18, primals_4, primals_6, primals_11, primals_16, reinterpret_tensor(primals_1, (4, ), (1, ), 0), buf1, buf3, reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(primals_1, (4, ), (1, ), 4), buf7, buf9, reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(buf11, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf19, buf20, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((3, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn from typing import Tuple import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.nn.functional import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) class GeneratorBlock(nn.Module): """ <a id="generator_block"></a> ### Generator Block ![Generator block](generator_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is a single channel). [`toRGB`](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.*--- The generator block consists of two [style blocks](#style_block) ($3 imes 3$ convolutions with style modulation) and an RGB output. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.style_block1 = StyleBlock(d_latent, in_features, out_features) self.style_block2 = StyleBlock(d_latent, out_features, out_features) self.to_rgb = ToRGB(d_latent, out_features) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tuple of two noise tensors of shape `[batch_size, 1, height, width]` """ x = self.style_block1(x, w, noise[0]) x = self.style_block2(x, w, noise[1]) rgb = self.to_rgb(x, w) return x, rgb def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = rindex // 9 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 36 * x0), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), rmask & xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 36 * x4), tmp13, rmask & xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tl.store(in_out_ptr0 + x4, tmp12, xmask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_12, (1,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_17, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf3, primals_6, buf1, buf4, 16, 36, XBLOCK=8, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_0[grid(16)](primals_9, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, primals_4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf6 buf9 = reinterpret_tensor(buf8, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf8 buf10 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf9, primals_11, buf7, buf10, 16, 36, XBLOCK=8, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2[grid(256)]( buf11, primals_7, primals_1, primals_8, buf20, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 del primals_8 buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 16, 4, 4), (0, 16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation= (1, 1), transposed=False, output_padding=(0, 0), groups=4, bias =None) assert_size_stride(buf12, (1, 16, 4, 4), (256, 16, 4, 1)) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf12 triton_poi_fused_add_leaky_relu_mul_3[grid(256)](buf13, primals_12, primals_1, primals_13, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 del primals_13 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_0[grid(16)](primals_14, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, primals_4, reinterpret_tensor( buf14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del buf14 del primals_15 buf16 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch .float32) triton_poi_fused_mul_4[grid(48)](primals_16, buf15, buf16, 48, XBLOCK=64, num_warps=1, num_stages=1) buf17 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=( 1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None ) assert_size_stride(buf17, (1, 12, 4, 4), (192, 16, 4, 1)) buf18 = reinterpret_tensor(buf17, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf17 buf19 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_5[grid(192)](buf18, primals_17, buf19, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 return (buf13, buf18, primals_4, primals_6, primals_11, primals_16, reinterpret_tensor(primals_1, (4,), (1,), 0), buf1, buf3, reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(primals_1, (4,), (1,), 4), buf7, buf9, reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(buf11, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf19, buf20) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights](#equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).*--- Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) ---*$A$ denotes a linear layer.*--- Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(3)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) class GeneratorBlockNew(nn.Module): """ <a id="generator_block"></a> ### Generator Block ![Generator block](generator_block.svg) ---*$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is a single channel). [`toRGB`](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.*--- The generator block consists of two [style blocks](#style_block) ($3 imes 3$ convolutions with style modulation) and an RGB output. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.style_block1 = StyleBlock(d_latent, in_features, out_features) self.style_block2 = StyleBlock(d_latent, out_features, out_features) self.to_rgb = ToRGB(d_latent, out_features) def forward(self, input_0, input_1, input_2): primals_7 = self.style_block1.scale_noise primals_3 = self.style_block1.bias primals_8 = self.style_block1.to_style.bias primals_1 = self.style_block1.to_style.weight.weight primals_6 = self.style_block1.conv.weight.weight primals_12 = self.style_block2.scale_noise primals_10 = self.style_block2.bias primals_13 = self.style_block2.to_style.bias primals_2 = self.style_block2.to_style.weight.weight primals_11 = self.style_block2.conv.weight.weight primals_17 = self.to_rgb.bias primals_15 = self.to_rgb.to_style.bias primals_4 = self.to_rgb.to_style.weight.weight primals_16 = self.to_rgb.conv.weight.weight primals_5 = input_0 primals_9 = input_1 primals_14 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
techthiyanes/annotated_deep_learning_paper_implementations
GeneratorBlock
false
16,584
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
BiasAdd
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lp/clpdykiyk36362w7fen67akood534pir74nvcoy7bcv25e3uagio.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.add] # Source node to ATen node mapping: # x => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BiasAdd(nn.Module): def __init__(self, channels, opts, act='linear', alpha=None, gain=None, lrmul=1): """ BiasAdd """ super(BiasAdd, self).__init__() self.opts = opts self.bias = torch.nn.Parameter(torch.zeros(channels, 1, 1) * lrmul) self.act = act self.alpha = alpha if alpha is not None else 0.2 self.gain = gain if gain is not None else 1.0 def forward(self, x): x += self.bias if self.act == 'linear': pass elif self.act == 'lrelu': x = F.leaky_relu(x, self.alpha, inplace=True) x = x * np.sqrt(2) if self.gain != 1: x = x * self.gain return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'opts': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, buf0 class BiasAddNew(nn.Module): def __init__(self, channels, opts, act='linear', alpha=None, gain=None, lrmul=1): """ BiasAdd """ super(BiasAddNew, self).__init__() self.opts = opts self.bias = torch.nn.Parameter(torch.zeros(channels, 1, 1) * lrmul) self.act = act self.alpha = alpha if alpha is not None else 0.2 self.gain = gain if gain is not None else 1.0 def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
tomguluson92/StyleGAN2_PyTorch
BiasAdd
false
16,585
[ "MIT" ]
89
4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
https://github.com/tomguluson92/StyleGAN2_PyTorch/tree/4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
GLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mt/cmtjpvemkkbiebqpm4ufe2nfqpi4qmoeltheqp7bk22vzvgkl5lt.py # Topologically Sorted Source Nodes: [sigmoid, x], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # x => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_ptr1 + (x0), None) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x0), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 4), (4, 1)) assert_size_stride(primals_5, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(buf0, buf1, buf2, 32768, grid=grid(32768), stream=stream0) return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def initialize_weight(x): nn.init.xavier_uniform_(x.weight) if x.bias is not None: nn.init.constant_(x.bias, 0) class GLU(nn.Module): def __init__(self, in_features, dropout_rate): super(GLU, self).__init__() self.sigm = nn.Sigmoid() self.W = nn.Linear(in_features, out_features=512, bias=True) self.V = nn.Linear(in_features, out_features=512, bias=True) initialize_weight(self.W) initialize_weight(self.V) self.dropout = nn.Dropout(dropout_rate) def forward(self, x): x = self.W(x) * self.sigm(self.V(x)) x = self.dropout(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_ptr1 + x0, None) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 4), (4, 1)) assert_size_stride(primals_5, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(32768)](buf0, buf1, buf2, 32768, XBLOCK=256, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 def initialize_weight(x): nn.init.xavier_uniform_(x.weight) if x.bias is not None: nn.init.constant_(x.bias, 0) class GLUNew(nn.Module): def __init__(self, in_features, dropout_rate): super(GLUNew, self).__init__() self.sigm = nn.Sigmoid() self.W = nn.Linear(in_features, out_features=512, bias=True) self.V = nn.Linear(in_features, out_features=512, bias=True) initialize_weight(self.W) initialize_weight(self.V) self.dropout = nn.Dropout(dropout_rate) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_4 = self.V.weight primals_5 = self.V.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
tijsmaas/transformer-pytorch
GLU
false
16,586
[ "MIT" ]
237
bb517979d62c416f68d66325f51826bbbf4ba1bd
https://github.com/tijsmaas/transformer-pytorch/tree/bb517979d62c416f68d66325f51826bbbf4ba1bd
SquaredErrorBayesRisk
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4m/c4mbktqccghizbaoiq4sp5jwan6vfxof7affhyhr32khwihn5hy7.py # Topologically Sorted Source Nodes: [alpha, p], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # alpha => add # p => div # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %unsqueeze), kwargs = {}) triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + ((4*x0) + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = tmp4 + tmp6 tmp9 = tmp8 + tmp1 tmp10 = tmp7 + tmp9 tmp12 = tmp11 + tmp1 tmp13 = tmp10 + tmp12 tmp14 = tmp2 / tmp13 tl.store(out_ptr0 + (x3), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kb/ckb3ux77dgnn2btgpdwtpvd62ighfwj3upxaxlwlv65qub5yeef2.py # Topologically Sorted Source Nodes: [sub, err, sub_1, mul, add_1, var, add_2, loss, mean], Original ATen: [aten.sub, aten.pow, aten.rsub, aten.mul, aten.add, aten.div, aten.sum, aten.mean] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # err => pow_1 # loss => sum_2 # mean => mean # mul => mul # sub => sub # sub_1 => sub_1 # var => div_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %div), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %div_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_2, [-1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (4*r3), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r3), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + ((16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (1 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (2 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (3 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + (4*r3)), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (1 + (4*r3)), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (5 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (6 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (7 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (2 + (4*r3)), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (2 + (4*r3)), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (8 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (9 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr2 + (10 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (11 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp63 = tl.load(in_ptr0 + (3 + (4*r3)), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr1 + (3 + (4*r3)), None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr2 + (12 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr2 + (13 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (14 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp77 = tl.load(in_ptr2 + (15 + (16*r0) + (64*r2)), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp6 = tmp1 * tmp5 tmp8 = tmp7 + tmp4 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp16 = tmp15 + tmp4 tmp17 = tmp14 + tmp16 tmp18 = tmp17 + tmp4 tmp19 = tmp6 / tmp18 tmp20 = tmp3 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp4 - tmp22 tmp26 = tmp22 * tmp25 tmp28 = tmp27 + tmp4 tmp30 = tmp29 + tmp4 tmp31 = tmp28 + tmp30 tmp33 = tmp32 + tmp4 tmp34 = tmp31 + tmp33 tmp36 = tmp35 + tmp4 tmp37 = tmp34 + tmp36 tmp38 = tmp37 + tmp4 tmp39 = tmp26 / tmp38 tmp40 = tmp24 + tmp39 tmp41 = tmp20 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tmp44 * tmp44 tmp46 = tmp4 - tmp43 tmp47 = tmp43 * tmp46 tmp49 = tmp48 + tmp4 tmp51 = tmp50 + tmp4 tmp52 = tmp49 + tmp51 tmp54 = tmp53 + tmp4 tmp55 = tmp52 + tmp54 tmp57 = tmp56 + tmp4 tmp58 = tmp55 + tmp57 tmp59 = tmp58 + tmp4 tmp60 = tmp47 / tmp59 tmp61 = tmp45 + tmp60 tmp62 = tmp41 + tmp61 tmp65 = tmp63 - tmp64 tmp66 = tmp65 * tmp65 tmp67 = tmp4 - tmp64 tmp68 = tmp64 * tmp67 tmp70 = tmp69 + tmp4 tmp72 = tmp71 + tmp4 tmp73 = tmp70 + tmp72 tmp75 = tmp74 + tmp4 tmp76 = tmp73 + tmp75 tmp78 = tmp77 + tmp4 tmp79 = tmp76 + tmp78 tmp80 = tmp79 + tmp4 tmp81 = tmp68 / tmp80 tmp82 = tmp66 + tmp81 tmp83 = tmp62 + tmp82 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = tl.sum(tmp84, 1)[:, None] tmp87 = 64.0 tmp88 = tmp86 / tmp87 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp88, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha, p], Original ATen: [aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [sub, err, sub_1, mul, add_1, var, add_2, loss, mean], Original ATen: [aten.sub, aten.pow, aten.rsub, aten.mul, aten.add, aten.div, aten.sum, aten.mean] triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1.run(buf3, arg1_1, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class SquaredErrorBayesRisk(Module): """ <a id="SquaredErrorBayesRisk"></a> ## Bayes Risk with Squared Error Loss Here the cost function is squared error, $$\\sum_{k=1}^K (y_k - p_k)^2 = \\Vert \\mathbf{y} - \\mathbf{p} \\Vert_2^2$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K (y_k - p_k)^2 \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K \\mathbb{E} \\Big[ y_k^2 -2 y_k p_k + p_k^2 \\Big] \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\end{align} Where $$\\mathbb{E}[p_k] = \\hat{p}_k = rac{ extcolor{orange}{lpha_k}}{S}$$ is the expected probability when sampled from the Dirichlet distribution and $$\\mathbb{E}[p_k^2] = \\mathbb{E}[p_k]^2 + ext{Var}(p_k)$$ where $$ ext{Var}(p_k) = rac{ extcolor{orange}{lpha_k}(S - extcolor{orange}{lpha_k})}{S^2 (S + 1)} = rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1}$$ is the variance. This gives, egin{align} \\mathcal{L}(\\Theta) &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k]^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ig( y_k -\\mathbb{E}[p_k] ig)^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ( y_k -\\hat{p}_k)^2 + rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1} \\Big) \\end{align} This first part of the equation $ig(y_k -\\mathbb{E}[p_k]ig)^2$ is the error term and the second part is the variance. """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 strength = alpha.sum(dim=-1) p = alpha / strength[:, None] err = (target - p) ** 2 var = p * (1 - p) / (strength[:, None] + 1) loss = (err + var).sum(dim=-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (4 * x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy ='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy ='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x2), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = tmp4 + tmp6 tmp9 = tmp8 + tmp1 tmp10 = tmp7 + tmp9 tmp12 = tmp11 + tmp1 tmp13 = tmp10 + tmp12 tmp14 = tmp2 / tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r3, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (16 * r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (1 + 16 * r0 + 64 * r2), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr2 + (2 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (3 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (1 + 4 * r3), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (4 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (5 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (6 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (7 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (2 + 4 * r3), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (8 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (9 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp53 = tl.load(in_ptr2 + (10 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (11 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp63 = tl.load(in_ptr0 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr1 + (3 + 4 * r3), None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr2 + (12 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr2 + (13 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (14 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp77 = tl.load(in_ptr2 + (15 + 16 * r0 + 64 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp6 = tmp1 * tmp5 tmp8 = tmp7 + tmp4 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp16 = tmp15 + tmp4 tmp17 = tmp14 + tmp16 tmp18 = tmp17 + tmp4 tmp19 = tmp6 / tmp18 tmp20 = tmp3 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp4 - tmp22 tmp26 = tmp22 * tmp25 tmp28 = tmp27 + tmp4 tmp30 = tmp29 + tmp4 tmp31 = tmp28 + tmp30 tmp33 = tmp32 + tmp4 tmp34 = tmp31 + tmp33 tmp36 = tmp35 + tmp4 tmp37 = tmp34 + tmp36 tmp38 = tmp37 + tmp4 tmp39 = tmp26 / tmp38 tmp40 = tmp24 + tmp39 tmp41 = tmp20 + tmp40 tmp44 = tmp42 - tmp43 tmp45 = tmp44 * tmp44 tmp46 = tmp4 - tmp43 tmp47 = tmp43 * tmp46 tmp49 = tmp48 + tmp4 tmp51 = tmp50 + tmp4 tmp52 = tmp49 + tmp51 tmp54 = tmp53 + tmp4 tmp55 = tmp52 + tmp54 tmp57 = tmp56 + tmp4 tmp58 = tmp55 + tmp57 tmp59 = tmp58 + tmp4 tmp60 = tmp47 / tmp59 tmp61 = tmp45 + tmp60 tmp62 = tmp41 + tmp61 tmp65 = tmp63 - tmp64 tmp66 = tmp65 * tmp65 tmp67 = tmp4 - tmp64 tmp68 = tmp64 * tmp67 tmp70 = tmp69 + tmp4 tmp72 = tmp71 + tmp4 tmp73 = tmp70 + tmp72 tmp75 = tmp74 + tmp4 tmp76 = tmp73 + tmp75 tmp78 = tmp77 + tmp4 tmp79 = tmp76 + tmp78 tmp80 = tmp79 + tmp4 tmp81 = tmp68 / tmp80 tmp82 = tmp66 + tmp81 tmp83 = tmp62 + tmp82 tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK]) tmp86 = tl.sum(tmp84, 1)[:, None] tmp87 = 64.0 tmp88 = tmp86 / tmp87 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp88, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1[grid(1)](buf3, arg1_1, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf0 return buf3, class SquaredErrorBayesRiskNew(Module): """ <a id="SquaredErrorBayesRisk"></a> ## Bayes Risk with Squared Error Loss Here the cost function is squared error, $$\\sum_{k=1}^K (y_k - p_k)^2 = \\Vert \\mathbf{y} - \\mathbf{p} \\Vert_2^2$$ We integrate this cost over all $\\mathbf{p}$ egin{align} \\mathcal{L}(\\Theta) &= -\\log \\Bigg( \\int \\Big[ \\sum_{k=1}^K (y_k - p_k)^2 \\Big] rac{1}{B( extcolor{orange}{\\mathbf{lpha}})} \\prod_{k=1}^K p_k^{ extcolor{orange}{lpha_k} - 1} d\\mathbf{p} \\Bigg ) \\ &= \\sum_{k=1}^K \\mathbb{E} \\Big[ y_k^2 -2 y_k p_k + p_k^2 \\Big] \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\end{align} Where $$\\mathbb{E}[p_k] = \\hat{p}_k = rac{ extcolor{orange}{lpha_k}}{S}$$ is the expected probability when sampled from the Dirichlet distribution and $$\\mathbb{E}[p_k^2] = \\mathbb{E}[p_k]^2 + ext{Var}(p_k)$$ where $$ ext{Var}(p_k) = rac{ extcolor{orange}{lpha_k}(S - extcolor{orange}{lpha_k})}{S^2 (S + 1)} = rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1}$$ is the variance. This gives, egin{align} \\mathcal{L}(\\Theta) &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k^2] \\Big) \\ &= \\sum_{k=1}^K \\Big( y_k^2 -2 y_k \\mathbb{E}[p_k] + \\mathbb{E}[p_k]^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ig( y_k -\\mathbb{E}[p_k] ig)^2 + ext{Var}(p_k) \\Big) \\ &= \\sum_{k=1}^K \\Big( ( y_k -\\hat{p}_k)^2 + rac{\\hat{p}_k(1 - \\hat{p}_k)}{S + 1} \\Big) \\end{align} This first part of the equation $ig(y_k -\\mathbb{E}[p_k]ig)^2$ is the error term and the second part is the variance. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
SquaredErrorBayesRisk
false
16,587
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
series_decomp
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/te/ctewxgnp3lhk7tphupxjzgvltd6too7ssyeqjrw6g6jgcpfxzctd.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%repeat, %arg0_1, %repeat_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (8*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + (4*((-1) + x1)) + (8*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 4, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + (4 + x0 + (8*x2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pk/cpkn3bgnsy3dppoopoc6rp7olei4o245lri5pr5dgfnj4i7ikjmh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_1 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze, [1, 4], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mu/cmuymhtkqbi67hzpmhp4twcizfguzwrjmcim5nvjf2ixscgodjht.py # Topologically Sorted Source Nodes: [res], Original ATen: [aten.sub] # Source node to ATen node mapping: # res => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %permute_1), kwargs = {}) triton_poi_fused_sub_2 = async_compile.triton('triton_poi_fused_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 8) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [res], Original ATen: [aten.sub] triton_poi_fused_sub_2.run(arg0_1, buf1, buf2, 32, grid=grid(32), stream=stream0) del arg0_1 return (buf2, reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 2, 4), (8, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class series_decomp(nn.Module): """ Series decomposition block """ def __init__(self, kernel_size): super(series_decomp, self).__init__() self.moving_avg = moving_avg(kernel_size, stride=1) def forward(self, x): moving_mean = self.moving_avg(x) res = x - moving_mean return res, moving_mean def get_inputs(): return [torch.rand([4, 2, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 8 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-1 + x1) + 8 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 4, tl.int64) tmp14 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_poi_fused_avg_pool2d_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_sub_2[grid(32)](arg0_1, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del arg0_1 return buf2, reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class series_decompNew(nn.Module): """ Series decomposition block """ def __init__(self, kernel_size): super(series_decompNew, self).__init__() self.moving_avg = moving_avg(kernel_size, stride=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
thuml/Autoformer
series_decomp
false
16,588
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
DilatedNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [conv2d, fst], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # fst => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [2, 2], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ux/cuxgiwuszechecyc4fzfkfjhnrh56ejhadc6qhttydnrjl6eqczg.py # Topologically Sorted Source Nodes: [conv2d_3, fourth, add, add_1, add_2, add_3], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # conv2d_3 => convolution_3 # fourth => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [16, 16], [16, 16], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %relu), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %relu_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %relu_3), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp3 = tl.load(in_ptr2 + (x3), xmask) tmp5 = tl.load(in_ptr3 + (x3), xmask) tmp7 = tl.load(in_ptr4 + (x3), xmask) tmp8 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tmp6 + tmp11 tmp13 = 0.0 tmp14 = tmp11 <= tmp13 tl.store(out_ptr0 + (x3), tmp12, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, fst], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, snd], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, thrd], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(16, 16), dilation=(16, 16), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, fourth, add, add_1, add_2, add_3], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_1.run(primals_3, buf1, buf3, buf5, buf6, primals_9, buf7, buf8, 256, grid=grid(256), stream=stream0) del buf6 del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torchvision.transforms.functional as F from torch.nn import functional as F from torch import nn class DilatedNet(nn.Module): def __init__(self, filters): super().__init__() self.filters = filters self.conv1 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=2, dilation=2) self.conv2 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=4, dilation=4) self.conv3 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=8, dilation=8) self.conv4 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=16, dilation=16) def forward(self, x): fst = F.relu(self.conv1(x)) snd = F.relu(self.conv2(fst)) thrd = F.relu(self.conv3(snd)) fourth = F.relu(self.conv4(thrd)) return x + fst + snd + thrd + fourth def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'filters': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x3, xmask) tmp5 = tl.load(in_ptr3 + x3, xmask) tmp7 = tl.load(in_ptr4 + x3, xmask) tmp8 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tmp6 + tmp11 tmp13 = 0.0 tmp14 = tmp11 <= tmp13 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(16, 16), dilation=(16, 16), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)]( primals_3, buf1, buf3, buf5, buf6, primals_9, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8) class DilatedNetNew(nn.Module): def __init__(self, filters): super().__init__() self.filters = filters self.conv1 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=2, dilation=2) self.conv2 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=4, dilation=4) self.conv3 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=8, dilation=8) self.conv4 = nn.Conv2d(self.filters[-1], self.filters[-1], 3, padding=16, dilation=16) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
tilacyn/dsb2018_topcoders
DilatedNet
false
16,589
[ "MIT" ]
413
e0f95ef70bc062d4dea321d2aa73231a9538cd63
https://github.com/tilacyn/dsb2018_topcoders/tree/e0f95ef70bc062d4dea321d2aa73231a9538cd63
my_Layernorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [x_hat], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_hat => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [x_hat], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_hat => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c6/cc6bquvrwn4byrcqha47sf3vkoxgdn67xthiyhl7b4736izdb4dj.py # Topologically Sorted Source Nodes: [bias, sub], Original ATen: [aten.repeat, aten.sub] # Source node to ATen node mapping: # bias => repeat # sub => sub_1 # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4, 1]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %repeat), kwargs = {}) triton_poi_fused_repeat_sub_2 = async_compile.triton('triton_poi_fused_repeat_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x_hat], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_hat], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias, sub], Original ATen: [aten.repeat, aten.sub] triton_poi_fused_repeat_sub_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class my_Layernorm(nn.Module): """ Special designed layernorm for the seasonal part """ def __init__(self, channels): super(my_Layernorm, self).__init__() self.layernorm = nn.LayerNorm(channels) def forward(self, x): x_hat = self.layernorm(x) bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) return x_hat - bias def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_repeat_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_repeat_sub_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3 class my_LayernormNew(nn.Module): """ Special designed layernorm for the seasonal part """ def __init__(self, channels): super(my_LayernormNew, self).__init__() self.layernorm = nn.LayerNorm(channels) def forward(self, input_0): primals_1 = self.layernorm.weight primals_2 = self.layernorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
thuml/Autoformer
my_Layernorm
false
16,590
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
Minibatch_stddev_layer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kd/ckdiefxtppglvtsgjgaajhzhyzrxhhjcgisaniawn5rbjhy5zgvd.py # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, y_4, y_5, y_6], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.repeat] # Source node to ATen node mapping: # add => add # mean => mean # pow_1 => pow_1 # y_1 => sub # y_2 => mean_1 # y_3 => sqrt # y_4 => mean_2 # y_5 => mean_3 # y_6 => repeat # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sqrt, [2, 3, 4], True), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_2, [2]), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%mean_3, [4, 1, 4, 4]), kwargs = {}) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_repeat_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_repeat_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tmp29 = 1.0 tmp30 = tmp28 / tmp29 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yi/cyidf2yj3fms5jdxlfe7fdijzfj6p5a5q2qxo4llkuxnpqh6fj5o.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, y_4, y_5, y_6], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.repeat] stream0 = get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Minibatch_stddev_layer(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, x): n, c, h, w = x.shape group_size = min(n, self.group_size) y = x.view(group_size, -1, self.num_new_features, c // self. num_new_features, h, w) y = y - torch.mean(y, dim=0, keepdim=True) y = torch.mean(y ** 2, dim=0) y = torch.sqrt(y + 1e-08) y = torch.mean(y, dim=[2, 3, 4], keepdim=True) y = torch.mean(y, dim=2) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tmp29 = 1.0 tmp30 = tmp28 / tmp29 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp30, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class Minibatch_stddev_layerNew(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
tomguluson92/StyleGAN2_PyTorch
Minibatch_stddev_layer
false
16,591
[ "MIT" ]
89
4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
https://github.com/tomguluson92/StyleGAN2_PyTorch/tree/4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
LearnedPositionalEmbeddings
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/26/c26shuqxjhfilaoens27tm25ij3y3e5ctdjjfr7fuwafpcfebmdi.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %select), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((5000, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class LearnedPositionalEmbeddings(Module): """ <a id="LearnedPositionalEmbeddings"></a> ## Add parameterized positional encodings This adds learned positional embeddings to patch embeddings. """ def __init__(self, d_model: 'int', max_len: 'int'=5000): """ * `d_model` is the transformer embeddings size * `max_len` is the maximum number of patches """ super().__init__() self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True) def forward(self, x: 'torch.Tensor'): """ * `x` is the patch embeddings of shape `[patches, batch_size, d_model]` """ pe = self.positional_encodings[x.shape[0]] return x + pe def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEmbeddingsNew(Module): """ <a id="LearnedPositionalEmbeddings"></a> ## Add parameterized positional encodings This adds learned positional embeddings to patch embeddings. """ def __init__(self, d_model: 'int', max_len: 'int'=5000): """ * `d_model` is the transformer embeddings size * `max_len` is the maximum number of patches """ super().__init__() self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True) def forward(self, input_0): primals_1 = self.positional_encodings primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
LearnedPositionalEmbeddings
false
16,592
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Aggregator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/to/ctoyzdhnandcesxf3taaspouwk6elycbpsvcjgls7pcwh2pzzjfg.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 64 x0 = xindex % 64 x2 = (xindex // 4096) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.0625 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yv/cyvvzwqfsuedhtdcuo3yqbwxs7zit7ejzgfq3gembsjiztf3doaw.py # Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, None) tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 65536, grid=grid(65536), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, 65536, grid=grid(65536), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torchvision.transforms.functional as F from torch.nn import functional as F from torch import nn class Aggregator(nn.Module): def __init__(self, in_channels, mid_channels, upsample_factor): super().__init__() self.upsample = nn.Upsample(scale_factor=2 ** upsample_factor) self.conv = nn.Conv2d(in_channels, mid_channels, 3, padding=1) def forward(self, x): x = self.upsample(x) x = F.relu(self.conv(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'mid_channels': 4, 'upsample_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x2 = xindex // 4096 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.0625 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(65536)](primals_1, buf0, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(65536)]( buf2, primals_3, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1 ) del primals_3 return buf2, primals_2, buf0, buf3 class AggregatorNew(nn.Module): def __init__(self, in_channels, mid_channels, upsample_factor): super().__init__() self.upsample = nn.Upsample(scale_factor=2 ** upsample_factor) self.conv = nn.Conv2d(in_channels, mid_channels, 3, padding=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
tilacyn/dsb2018_topcoders
Aggregator
false
16,593
[ "MIT" ]
413
e0f95ef70bc062d4dea321d2aa73231a9538cd63
https://github.com/tilacyn/dsb2018_topcoders/tree/e0f95ef70bc062d4dea321d2aa73231a9538cd63
AdaptiveMaxPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ju/cjufs2s6kpppx2j7dlywrgo5ev3w5gafdyq6omiw6xrpch72eive.py # Topologically Sorted Source Nodes: [adaptive_max_pool2d, gt], Original ATen: [aten.adaptive_max_pool2d, aten.gt] # Source node to ATen node mapping: # adaptive_max_pool2d => adaptive_max_pool2d # gt => gt # Graph fragment: # %adaptive_max_pool2d : [num_users=2] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%getitem, 0), kwargs = {}) triton_poi_fused_adaptive_max_pool2d_gt_0 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_gt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_adaptive_max_pool2d_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jd/cjdkkc7dnkw7542bfmhd6kpfaq53672wcboiwj6vm5uysgviskdd.py # Topologically Sorted Source Nodes: [trace], Original ATen: [aten.index] # Source node to ATen node mapping: # trace => index # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%getitem_1]), kwargs = {}) triton_poi_fused_index_1 = async_compile.triton('triton_poi_fused_index_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = (xindex // 64) x4 = xindex % 1024 x5 = xindex tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x5), tmp1, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (16, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [adaptive_max_pool2d, gt], Original ATen: [aten.adaptive_max_pool2d, aten.gt] stream0 = get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_gt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [trace], Original ATen: [aten.index] triton_poi_fused_index_1.run(arg0_1, arg1_1, buf1, 16384, grid=grid(16384), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((16, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _SpikeAdaptiveMaxPoolNd(nn.Module): def __init__(self, output_size): super(_SpikeAdaptiveMaxPoolNd, self).__init__() self.output_size = output_size self.return_indices = True def reset_state(self): pass class AdaptiveMaxPool2d(_SpikeAdaptiveMaxPoolNd): """Simple port of PyTorch AdaptiveMaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, x, trace): x = x x, idx = F.adaptive_max_pool2d(x, self.output_size, self.return_indices ) trace = trace[idx] return x > 0, trace def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([16, 4, 4, 4])] def get_init_inputs(): return [[], {'output_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_adaptive_max_pool2d_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex // 64 x4 = xindex % 1024 x5 = xindex tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp1, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (16, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_gt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_index_1[grid(16384)](arg0_1, arg1_1, buf1, 16384, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class _SpikeAdaptiveMaxPoolNd(nn.Module): def __init__(self, output_size): super(_SpikeAdaptiveMaxPoolNd, self).__init__() self.output_size = output_size self.return_indices = True def reset_state(self): pass class AdaptiveMaxPool2dNew(_SpikeAdaptiveMaxPoolNd): """Simple port of PyTorch AdaptiveMaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
tomking/PySNN
AdaptiveMaxPool2d
false
16,594
[ "MIT" ]
175
c99ba6cd28a518dc07cab765acac9b69ac6fe36b
https://github.com/tomking/PySNN/tree/c99ba6cd28a518dc07cab765acac9b69ac6fe36b
TokenEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wf/cwfgpodivarq2gzz7nodlok35jybiut5djlrlgyaw23yzzih2tt7.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] # Source node to ATen node mapping: # pad => copy # Graph fragment: # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1, %slice_2), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %copy, 2, 1, 5), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_7, 2, 0, 1), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 2, 5, 6), kwargs = {}) triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = (yindex // 6) tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + ((-4) + x2 + (4*y0) + (16*y1)), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float("nan") tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + ((-20) + x2 + (4*y0) + (16*y1)), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + (4*y0) + (16*y1)), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + ((-4) + x2 + (4*y0) + (16*y1)), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + (6*x2) + (24*y1)), tmp42, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_copy_0.run(primals_1, buf1, 24, 4, grid=grid(24, 4), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1 class TokenEmbeddingNew(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbeddingNew, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, input_0): primals_2 = self.tokenConv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
thuml/Autoformer
TokenEmbedding
false
16,595
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
ActNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/n3/cn3wlek7bl5jirlp73pieiarvuadnzcb6uy5z3ieztq35hnq6trv.py # Topologically Sorted Source Nodes: [exp, mul, z], Original ATen: [aten.exp, aten.mul, aten.add] # Source node to ATen node mapping: # exp => exp # mul => mul # z => add # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_exp_mul_0 = async_compile.triton('triton_poi_fused_add_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hj/chjctndcct3uy5zpfj56qun4jpqjq3jp7qekxbult5tkzhebqoca.py # Topologically Sorted Source Nodes: [log_det], Original ATen: [aten.sum] # Source node to ATen node mapping: # log_det => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%primals_1,), kwargs = {}) triton_per_fused_sum_1 = async_compile.triton('triton_per_fused_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp, mul, z], Original ATen: [aten.exp, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_mul_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_3 buf1 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [log_det], Original ATen: [aten.sum] triton_per_fused_sum_1.run(primals_1, buf1, 1, 4, grid=grid(1), stream=stream0) return (buf0, buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ActNorm(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype=torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype=torch.float)) def forward(self, x): z = x * torch.exp(self.log_sigma) + self.mu log_det = torch.sum(self.log_sigma) return z, log_det def inverse(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_per_fused_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused_sum_1[grid(1)](primals_1, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) return buf0, buf1, primals_1, primals_2 class ActNormNew(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype=torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype=torch.float)) def inverse(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det def forward(self, input_0): primals_1 = self.mu primals_3 = self.log_sigma primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
tonyduan/hybrid-models
ActNorm
false
16,596
[ "MIT" ]
238
a29bff4756d8306cd24515f2fb825763a71c3d90
https://github.com/tonyduan/hybrid-models/tree/a29bff4756d8306cd24515f2fb825763a71c3d90
GatedMaskedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lk/clkvmfnc5ppj6ffcl2v3tvac6pbvz3y7mizzrdi65zokiejjhua3.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 5 x0 = xindex % 6 x2 = (xindex // 30) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-1) + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pj/cpjc45ogatjtexy7be6gijiwluzdhthxjvcnnqmgqlinbpzrsmbi.py # Topologically Sorted Source Nodes: [conv2d, pad_1], Original ATen: [aten.convolution, aten.constant_pad_nd] # Source node to ATen node mapping: # conv2d => convolution # pad_1 => constant_pad_nd_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [0, 0, 1, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 5 x4 = (xindex // 20) x5 = xindex % 20 x2 = (xindex // 20) % 8 x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + ((-4) + x5 + (16*x4)), tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + (x2), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp2, tmp5, tmp6) tl.store(out_ptr0 + (x6), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/es/ceshmhutmeo5xl2yl3dxwxrpje5njiwcfwwoj52wrkr3mcl5lhub.py # Topologically Sorted Source Nodes: [tanh, sigmoid, v_map_out], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # tanh => tanh # v_map_out => mul # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%slice_6,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_8,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (20*x1) + (160*x2)), xmask) tmp2 = tl.load(in_ptr0 + (80 + x0 + (20*x1) + (160*x2)), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4b/c4bwymcjfsik2yzrr5le4v2j3mroi4pfhlquggnmpgqwjxzshdbs.py # Topologically Sorted Source Nodes: [pad_2], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad_2 => constant_pad_nd_2 # Graph fragment: # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_6, [1, 0, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/md/cmdddio6ujcwbhy6hgzu6huywjuoh3klsp37w5ygk2v4gwwrdoev.py # Topologically Sorted Source Nodes: [tanh_1, sigmoid_1, h_out_2], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # h_out_2 => mul_1 # sigmoid_1 => sigmoid_1 # tanh_1 => tanh_1 # Graph fragment: # %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%slice_10,), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_12,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_1, %sigmoid_1), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_4 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 64) x4 = xindex % 64 x1 = (xindex // 16) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x4 + (128*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + (128*x2)), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (64 + x4 + (128*x2)), xmask) tmp9 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (64 + x4 + (128*x2)), xmask) tmp12 = tl.load(in_ptr3 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp7 * tmp15 tl.store(out_ptr0 + (x3), tmp7, xmask) tl.store(out_ptr1 + (x3), tmp15, xmask) tl.store(out_ptr2 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ti/ctitm3fmik35mxgmabf5id22xrqrvhqyrxpmktt2s2eg77n2c7xt.py # Topologically Sorted Source Nodes: [h_map_out, h_map_out_1], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # h_map_out => convolution_3 # h_map_out_1 => add_1 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_1, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %primals_6), kwargs = {}) triton_poi_fused_add_convolution_5 = async_compile.triton('triton_poi_fused_add_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (8, 4, 1, 2), (8, 2, 2, 1)) assert_size_stride(primals_8, (8, ), (1, )) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 480, grid=grid(480), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 5, 4), (160, 20, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, pad_1], Original ATen: [aten.convolution, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_1.run(buf1, primals_3, buf2, 640, grid=grid(640), stream=stream0) del buf1 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, sigmoid, v_map_out], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_tanh_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [vh], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 4, 4), (128, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [pad_2], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_3.run(primals_6, buf5, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [h_out], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 4, 4), (128, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, sigmoid_1, h_out_2], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_tanh_4.run(buf6, primals_8, buf4, primals_5, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf4 del buf6 del primals_5 del primals_8 # Topologically Sorted Source Nodes: [h_map_out], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [h_map_out, h_map_out_1], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_5.run(buf11, primals_10, primals_6, 256, grid=grid(256), stream=stream0) del primals_10 del primals_6 return (buf3, buf11, primals_2, primals_4, primals_7, primals_9, buf0, reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), buf5, buf7, buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 2, 3), (24, 6, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, 4, 1, 2), (8, 2, 2, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class GatedMaskedConv2d(nn.Module): def __init__(self, in_dim, out_dim=None, kernel_size=3, mask='B'): super(GatedMaskedConv2d, self).__init__() if out_dim is None: out_dim = in_dim self.dim = out_dim self.size = kernel_size self.mask = mask pad = self.size // 2 self.v_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size)) self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0) self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0) self.vh_conv = nn.Conv2d(2 * self.dim, 2 * self.dim, kernel_size=1) self.h_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(1, pad + 1)) self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0) self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0) self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1) def forward(self, v_map, h_map): v_out = self.v_pad2(self.v_conv(self.v_pad1(v_map)))[:, :, :-1, :] v_map_out = F.tanh(v_out[:, :self.dim]) * F.sigmoid(v_out[:, self.dim:] ) vh = self.vh_conv(v_out) h_out = self.h_conv(self.h_pad1(h_map)) if self.mask == 'A': h_out = self.h_pad2(h_out)[:, :, :, :-1] h_out = h_out + vh h_out = F.tanh(h_out[:, :self.dim]) * F.sigmoid(h_out[:, self.dim:]) h_map_out = self.h_conv_res(h_out) if self.mask == 'B': h_map_out = h_map_out + h_map return v_map_out, h_map_out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 5 x0 = xindex % 6 x2 = xindex // 30 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x4 = xindex // 20 x5 = xindex % 20 x2 = xindex // 20 % 8 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + x2, tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp2, tmp5, tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1 + 160 * x2), xmask) tmp2 = tl.load(in_ptr0 + (80 + x0 + 20 * x1 + 160 * x2), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x4 = xindex % 64 x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 128 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 128 * x2), xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (64 + x4 + 128 * x2), xmask) tmp9 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (64 + x4 + 128 * x2), xmask) tmp12 = tl.load(in_ptr3 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp7 * tmp15 tl.store(out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr1 + x3, tmp15, xmask) tl.store(out_ptr2 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_add_convolution_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 2, 3), (24, 6, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (8, 4, 1, 2), (8, 2, 2, 1)) assert_size_stride(primals_8, (8,), (1,)) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 5, 4), (160, 20, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(640)](buf1, primals_3, buf2, 640, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 4, 4), (128, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_constant_pad_nd_3[grid(320)](primals_6, buf5, 320, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 4, 4), (128, 16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_tanh_4[grid(256)](buf6, primals_8, buf4, primals_5, buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del buf6 del primals_5 del primals_8 buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_add_convolution_5[grid(256)](buf11, primals_10, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 del primals_6 return (buf3, buf11, primals_2, primals_4, primals_7, primals_9, buf0, reinterpret_tensor(buf2, (4, 8, 4, 4), (160, 20, 4, 1), 0), buf5, buf7, buf8, buf9) class GatedMaskedConv2dNew(nn.Module): def __init__(self, in_dim, out_dim=None, kernel_size=3, mask='B'): super(GatedMaskedConv2dNew, self).__init__() if out_dim is None: out_dim = in_dim self.dim = out_dim self.size = kernel_size self.mask = mask pad = self.size // 2 self.v_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size)) self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0) self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0) self.vh_conv = nn.Conv2d(2 * self.dim, 2 * self.dim, kernel_size=1) self.h_conv = nn.Conv2d(in_dim, 2 * self.dim, kernel_size=(1, pad + 1)) self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0) self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0) self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1) def forward(self, input_0, input_1): primals_2 = self.v_conv.weight primals_3 = self.v_conv.bias primals_4 = self.vh_conv.weight primals_5 = self.vh_conv.bias primals_7 = self.h_conv.weight primals_8 = self.h_conv.bias primals_9 = self.h_conv_res.weight primals_10 = self.h_conv_res.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1]
tom-pelsmaeker/vae-lagging-encoder
GatedMaskedConv2d
false
16,597
[ "MIT" ]
173
b190239019a94c85858d188a0853886eb48ce4be
https://github.com/tom-pelsmaeker/vae-lagging-encoder/tree/b190239019a94c85858d188a0853886eb48ce4be
MaxPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/el/celxe74tyyqfbi4jfjnsckgl5t4splv2pudvtrmxrgkiifbz7rkd.py # Topologically Sorted Source Nodes: [max_pool2d, gt, trace], Original ATen: [aten.max_pool2d_with_indices, aten.gt, aten.index] # Source node to ATen node mapping: # gt => gt # max_pool2d => _low_memory_max_pool2d_with_offsets # trace => index # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%getitem, 0), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%view, [%view_1]), kwargs = {}) triton_poi_fused_gt_index_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_gt_index_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_index_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gt_index_max_pool2d_with_indices_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 > tmp0 tmp32 = tl.full([1], 1, tl.int8) tmp33 = tl.full([1], 0, tl.int8) tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp3 > tmp2 tmp36 = tl.full([1], 2, tl.int8) tmp37 = tl.where(tmp35, tmp36, tmp34) tmp38 = tmp5 > tmp4 tmp39 = tl.full([1], 3, tl.int8) tmp40 = tl.where(tmp38, tmp39, tmp37) tmp41 = tmp7 > tmp6 tmp42 = tl.full([1], 4, tl.int8) tmp43 = tl.where(tmp41, tmp42, tmp40) tmp44 = tmp9 > tmp8 tmp45 = tl.full([1], 5, tl.int8) tmp46 = tl.where(tmp44, tmp45, tmp43) tmp47 = tmp11 > tmp10 tmp48 = tl.full([1], 6, tl.int8) tmp49 = tl.where(tmp47, tmp48, tmp46) tmp50 = tmp13 > tmp12 tmp51 = tl.full([1], 7, tl.int8) tmp52 = tl.where(tmp50, tmp51, tmp49) tmp53 = tmp15 > tmp14 tmp54 = tl.full([1], 8, tl.int8) tmp55 = tl.where(tmp53, tmp54, tmp52) tmp56 = tmp17 > tmp16 tmp57 = tl.full([1], 9, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp19 > tmp18 tmp60 = tl.full([1], 10, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp21 > tmp20 tmp63 = tl.full([1], 11, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp23 > tmp22 tmp66 = tl.full([1], 12, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp25 > tmp24 tmp69 = tl.full([1], 13, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp27 > tmp26 tmp72 = tl.full([1], 14, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp29 > tmp28 tmp75 = tl.full([1], 15, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = 0.0 tmp78 = tmp30 > tmp77 tmp79 = tl.full([1], 4, tl.int32) tmp80 = tl.where((tmp76 < 0) != (tmp79 < 0), tl.where(tmp76 % tmp79 != 0, tmp76 // tmp79 - 1, tmp76 // tmp79), tmp76 // tmp79) tmp81 = tmp80 * tmp79 tmp82 = tmp76 - tmp81 tmp83 = tl.full([1], 0, tl.int64) tmp84 = tmp83 + tmp80 tmp85 = tmp83 + tmp82 tmp86 = tl.full([1], 4, tl.int64) tmp87 = tmp84 * tmp86 tmp88 = tmp87 + tmp85 tmp89 = tl.full([XBLOCK], 256, tl.int32) tmp90 = tmp88 + tmp89 tmp91 = tmp88 < 0 tmp92 = tl.where(tmp91, tmp90, tmp88) tl.device_assert(((0 <= tmp92) & (tmp92 < 256)) | ~(xmask), "index out of bounds: 0 <= tmp92 < 256") tmp94 = tl.load(in_ptr1 + (tmp92), xmask, eviction_policy='evict_last') tl.store(out_ptr2 + (x0), tmp78, xmask) tl.store(out_ptr3 + (x0), tmp94, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) buf3 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d, gt, trace], Original ATen: [aten.max_pool2d_with_indices, aten.gt, aten.index] stream0 = get_raw_stream(0) triton_poi_fused_gt_index_max_pool2d_with_indices_0.run(arg0_1, arg1_1, buf2, buf3, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 return (buf2, reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class _SpikeMaxPoolNd(nn.Module): def __init__(self, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False): super(_SpikeMaxPoolNd, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.ceil_mode = ceil_mode self.return_indices = True def reset_state(self): pass class MaxPool2d(_SpikeMaxPoolNd): """Simple port of PyTorch MaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, x, trace): x = x x, idx = F.max_pool2d(x, self.kernel_size, self.stride, self. padding, self.dilation, self.ceil_mode, self.return_indices) trace = trace.view(-1)[idx.view(-1)] trace = trace.view(idx.shape) return x > 0, trace def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gt_index_max_pool2d_with_indices_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 > tmp0 tmp32 = tl.full([1], 1, tl.int8) tmp33 = tl.full([1], 0, tl.int8) tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp3 > tmp2 tmp36 = tl.full([1], 2, tl.int8) tmp37 = tl.where(tmp35, tmp36, tmp34) tmp38 = tmp5 > tmp4 tmp39 = tl.full([1], 3, tl.int8) tmp40 = tl.where(tmp38, tmp39, tmp37) tmp41 = tmp7 > tmp6 tmp42 = tl.full([1], 4, tl.int8) tmp43 = tl.where(tmp41, tmp42, tmp40) tmp44 = tmp9 > tmp8 tmp45 = tl.full([1], 5, tl.int8) tmp46 = tl.where(tmp44, tmp45, tmp43) tmp47 = tmp11 > tmp10 tmp48 = tl.full([1], 6, tl.int8) tmp49 = tl.where(tmp47, tmp48, tmp46) tmp50 = tmp13 > tmp12 tmp51 = tl.full([1], 7, tl.int8) tmp52 = tl.where(tmp50, tmp51, tmp49) tmp53 = tmp15 > tmp14 tmp54 = tl.full([1], 8, tl.int8) tmp55 = tl.where(tmp53, tmp54, tmp52) tmp56 = tmp17 > tmp16 tmp57 = tl.full([1], 9, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp19 > tmp18 tmp60 = tl.full([1], 10, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp21 > tmp20 tmp63 = tl.full([1], 11, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp23 > tmp22 tmp66 = tl.full([1], 12, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp25 > tmp24 tmp69 = tl.full([1], 13, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp27 > tmp26 tmp72 = tl.full([1], 14, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp29 > tmp28 tmp75 = tl.full([1], 15, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = 0.0 tmp78 = tmp30 > tmp77 tmp79 = tl.full([1], 4, tl.int32) tmp80 = tl.where((tmp76 < 0) != (tmp79 < 0), tl.where(tmp76 % tmp79 != 0, tmp76 // tmp79 - 1, tmp76 // tmp79), tmp76 // tmp79) tmp81 = tmp80 * tmp79 tmp82 = tmp76 - tmp81 tmp83 = tl.full([1], 0, tl.int64) tmp84 = tmp83 + tmp80 tmp85 = tmp83 + tmp82 tmp86 = tl.full([1], 4, tl.int64) tmp87 = tmp84 * tmp86 tmp88 = tmp87 + tmp85 tmp89 = tl.full([XBLOCK], 256, tl.int32) tmp90 = tmp88 + tmp89 tmp91 = tmp88 < 0 tmp92 = tl.where(tmp91, tmp90, tmp88) tl.device_assert((0 <= tmp92) & (tmp92 < 256) | ~xmask, 'index out of bounds: 0 <= tmp92 < 256') tmp94 = tl.load(in_ptr1 + tmp92, xmask, eviction_policy='evict_last') tl.store(out_ptr2 + x0, tmp78, xmask) tl.store(out_ptr3 + x0, tmp94, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) buf3 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_gt_index_max_pool2d_with_indices_0[grid(16)](arg0_1, arg1_1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf2, reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0) class _SpikeMaxPoolNd(nn.Module): def __init__(self, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False): super(_SpikeMaxPoolNd, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation self.ceil_mode = ceil_mode self.return_indices = True def reset_state(self): pass class MaxPool2dNew(_SpikeMaxPoolNd): """Simple port of PyTorch MaxPool2d with small adjustment for spiking operations. Currently pooling only supports operations on floating point numbers, thus it casts the uint8 spikes to floats back and forth. The trace of the 'maximum' spike is also returned. In case of multiple spikes within pooling window, returns first spike of the window (top left corner). """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
tomking/PySNN
MaxPool2d
false
16,598
[ "MIT" ]
175
c99ba6cd28a518dc07cab765acac9b69ac6fe36b
https://github.com/tomking/PySNN/tree/c99ba6cd28a518dc07cab765acac9b69ac6fe36b
DisAlignFastRCNNOutputLayers
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jt/cjt3jbpkn43oz2wvjw7bykbd6f6up6xa6tqayp3o3kx3seitgexv.py # Topologically Sorted Source Nodes: [aligned_scores], Original ATen: [aten.cat] # Source node to ATen node mapping: # aligned_scores => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_1, %view], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + ((5*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr3 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tmp6 * tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp6 tmp15 = tmp14 * tmp7 tmp16 = tmp12 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tl.load(in_ptr1 + (4 + (5*x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.where(tmp4, tmp18, tmp22) tl.store(out_ptr0 + (x2), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (5, ), (1, )) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 5), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [aligned_scores], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf2, buf0, primals_6, primals_7, buf3, 20, grid=grid(20), stream=stream0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proposal_deltas], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, primals_1, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 return (buf3, buf4, primals_1, primals_6, primals_7, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) class DisAlignFastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(DisAlignFastRCNNOutputLayers, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.logit_scale = nn.Parameter(torch.ones(num_classes)) self.logit_bias = nn.Parameter(torch.zeros(num_classes)) self.confidence_layer = nn.Linear(input_size, 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.confidence_layer.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]: nn.init.constant_(layer.bias, 0) def forward(self, x): if x.dim() > 2: x = torch.flatten(x, start_dim=1) scores = self.cls_score(x) confidence = self.confidence_layer(x).sigmoid() scores_tmp = confidence * (scores[:, :-1] * self.logit_scale + self .logit_bias) scores_tmp = scores_tmp + (1 - confidence) * scores[:, :-1] aligned_scores = cat([scores_tmp, scores[:, -1].view(-1, 1)], dim=1) proposal_deltas = self.bbox_pred(x) return aligned_scores, proposal_deltas def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4, 'cls_agnostic_bbox_reg': 4} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + (5 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr3 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tmp6 * tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp6 tmp15 = tmp14 * tmp7 tmp16 = tmp12 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp22 = tl.load(in_ptr1 + (4 + 5 * x1), tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.where(tmp4, tmp18, tmp22) tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (5,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 5), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(20)](buf2, buf0, primals_6, primals_7, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, primals_1, reinterpret_tensor( primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 return buf3, buf4, primals_1, primals_6, primals_7, buf0, buf2 def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) class DisAlignFastRCNNOutputLayersNew(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(DisAlignFastRCNNOutputLayersNew, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.logit_scale = nn.Parameter(torch.ones(num_classes)) self.logit_bias = nn.Parameter(torch.zeros(num_classes)) self.confidence_layer = nn.Linear(input_size, 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.confidence_layer.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]: nn.init.constant_(layer.bias, 0) def forward(self, input_0): primals_6 = self.logit_scale primals_7 = self.logit_bias primals_2 = self.cls_score.weight primals_3 = self.cls_score.bias primals_1 = self.bbox_pred.weight primals_9 = self.bbox_pred.bias primals_4 = self.confidence_layer.weight primals_5 = self.confidence_layer.bias primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
tonysy/cvpods
DisAlignFastRCNNOutputLayers
false
16,599
[ "Apache-2.0" ]
548
e322d7842ca0e34b1ef6237ea6d350633efc793a
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
RNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nc/cnc6a3vkphurm472zdavmn3qnff4lmaezxs63jlllw2kks2e62a4.py # Topologically Sorted Source Nodes: [input_combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_combined => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ih/cihkol22ojnmrk724q4odcm6ilz575wmbnulie74gzdcgue24tib.py # Topologically Sorted Source Nodes: [output_combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # output_combined => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%addmm, %addmm_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gd/cgdq755g3clp3t5icrbudwx4ir4xygtoz6ug4jo2euegtyg5mdnp.py # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output_3 => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j5/cj5hwj7ockkcleq56wmrpwxavcu7lllqodtnsxnd6sbzznn7lu6j.py # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output_3 => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_combined], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4) # alias # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0) # alias # Topologically Sorted Source Nodes: [output_combined], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf3, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_3.run(buf6, buf7, 16, grid=grid(16), stream=stream0) del buf6 return (buf7, buf1, buf0, buf4, buf7, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): super(RNN, self).__init__() self.hidden_size = hidden_size self.all_categories = all_categories self.n_categories = n_categories self.all_letters = all_letters self.n_letters = n_letters self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) def forward(self, category, input_tensor, hidden): input_combined = torch.cat((category, input_tensor, hidden), 1) hidden = self.i2h(input_combined) output = self.i2o(input_combined) output_combined = torch.cat((hidden, output), 1) output = self.o2o(output_combined) output = self.dropout(output) output = self.softmax(output) return output, hidden def init_hidden(self): return Variable(torch.zeros(1, self.hidden_size)) @staticmethod def gen_input_tensor(all_letters, n_letters, line): tensor = torch.zeros(len(line), 1, n_letters) for li in range(len(line)): letter = line[li] tensor[li][0][all_letters.find(letter)] = 1 return tensor @staticmethod def gen_category_tensor(all_categories, n_categories, category): li = all_categories.index(category) tensor = torch.zeros(1, n_categories) tensor[0][li] = 1 return tensor def sample(self, category, start_letter='A'): category_tensor = Variable(self.gen_category_tensor(self. all_categories, self.n_categories, category)) input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) hidden = self.init_hidden() output_name = start_letter max_length = 20 for i in range(max_length): output, hidden = self.forward(category_tensor, input_tensor[0], hidden) _topv, topi = output.data.topk(1) topi = topi[0][0] if topi == self.n_letters - 1: break else: letter = self.all_letters[topi] output_name += letter input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) return output_name def samples(self, category, start_letters='ABC'): for start_letter in start_letters: yield self.sample(category, start_letter) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4, 'all_categories': 4, 'n_categories': 4, 'all_letters': 4, 'n_letters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4) extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0) triton_poi_fused_cat_1[grid(16)](buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__log_softmax_3[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 return buf7, buf1, buf0, buf4, buf7, primals_8 class RNNNew(nn.Module): def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): super(RNNNew, self).__init__() self.hidden_size = hidden_size self.all_categories = all_categories self.n_categories = n_categories self.all_letters = all_letters self.n_letters = n_letters self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) def init_hidden(self): return Variable(torch.zeros(1, self.hidden_size)) @staticmethod def gen_input_tensor(all_letters, n_letters, line): tensor = torch.zeros(len(line), 1, n_letters) for li in range(len(line)): letter = line[li] tensor[li][0][all_letters.find(letter)] = 1 return tensor @staticmethod def gen_category_tensor(all_categories, n_categories, category): li = all_categories.index(category) tensor = torch.zeros(1, n_categories) tensor[0][li] = 1 return tensor def sample(self, category, start_letter='A'): category_tensor = Variable(self.gen_category_tensor(self. all_categories, self.n_categories, category)) input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) hidden = self.init_hidden() output_name = start_letter max_length = 20 for i in range(max_length): output, hidden = self.forward(category_tensor, input_tensor[0], hidden) _topv, topi = output.data.topk(1) topi = topi[0][0] if topi == self.n_letters - 1: break else: letter = self.all_letters[topi] output_name += letter input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) return output_name def samples(self, category, start_letters='ABC'): for start_letter in start_letters: yield self.sample(category, start_letter) def forward(self, input_0, input_1, input_2): primals_4 = self.i2h.weight primals_5 = self.i2h.bias primals_6 = self.i2o.weight primals_7 = self.i2o.bias primals_8 = self.o2o.weight primals_9 = self.o2o.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
tom-kuchler/vhive
RNN
false
16,600
[ "MIT" ]
138
ae1f2f5920e7607e9902ed1060bda62b56e332ac
https://github.com/tom-kuchler/vhive/tree/ae1f2f5920e7607e9902ed1060bda62b56e332ac
Upsample2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/s5/cs53ve6ff5smmuu5zxn6uwnvgsi32uuuvewmis3iy7oejdcvopvs.py # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # y_5 => constant_pad_nd_1 # Graph fragment: # %constant_pad_nd_1 : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view_2, [0, 0, 1, 0, 1, 0, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 72) % 2 x1 = (xindex // 8) % 9 x0 = xindex % 8 x3 = (xindex // 144) x7 = xindex tmp0 = (-1) + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-1) + x1 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (((-1) + x1) % 2) tmp7 = tmp6 >= tmp1 tmp8 = (-1) + (x0 % 2) tmp9 = tmp8 >= tmp1 tmp10 = tmp7 & tmp9 tmp11 = tmp10 & tmp5 tmp12 = tl.load(in_ptr0 + ((4*((((-1) + x1) // 2) % 4)) + (16*x3) + (x0 // 2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + (x7), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qb/cqbibvks2cjhauc3nur2g3ih7ql3ix5qhuh7srxpiit2lwdgiiiw.py # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y_9 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %arg1_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 8], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = (yindex // 8) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (8*x2) + (144*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (8*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zd/czdxbktkc7fglcerxemcjxwvuh5ktql4jw5x5gevz5yr4c7hicmg.py # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # y_11 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_5, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.625), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_4, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_4, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_4, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_4, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 4, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + (5*tmp13) + (25*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + (5*tmp13) + (25*x2)), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + (5*tmp9) + (25*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + (5*tmp9) + (25*x2)), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + (x3), tmp44, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 4, 4), (16, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2, 9, 8), (144, 72, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 2304, grid=grid(2304), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((16, 1, 8, 8), (64, 1, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf0, buf1, 128, 8, grid=grid(128, 8), stream=stream0) del buf0 # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (16, 1, 5, 5), (25, 1, 5, 1)) del arg1_1 buf3 = reinterpret_tensor(buf1, (16, 1, 8, 8), (64, 1024, 8, 1), 0); del buf1 # reuse buf5 = buf3; del buf3 # reuse buf6 = reinterpret_tensor(buf5, (16, 1, 8, 8), (64, 1, 8, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2.run(buf6, buf2, 1024, grid=grid(1024), stream=stream0) del buf2 return (reinterpret_tensor(buf6, (4, 4, 8, 8), (256, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Upsample2d(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = torch.flip(self.k, [2, 3]) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, 1, inW, 1, 1)) y = F.pad(y, (0, 0, self.factor - 1, 0, 0, 0, self.factor - 1, 0, 0, 0, 0, 0)) y = torch.reshape(y, (-1, 1, inH * self.factor, inW * self.factor)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH * self.factor + self.pady0 + self.pady1, inW * self.factor + self.padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH * self.factor + self.pady0 + self.pady1 - self.kernelH + 1, inW * self.factor + self.padx0 + self.padx1 - self.kernelW + 1) if inH * self.factor != y.shape[1]: y = F.interpolate(y, size=(inH * self.factor, inW * self.factor ), mode='bilinear') y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH * self.factor, inW * self.factor) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opts': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 72 % 2 x1 = xindex // 8 % 9 x0 = xindex % 8 x3 = xindex // 144 x7 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x1 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = -1 + (-1 + x1) % 2 tmp7 = tmp6 >= tmp1 tmp8 = -1 + x0 % 2 tmp9 = tmp8 >= tmp1 tmp10 = tmp7 & tmp9 tmp11 = tmp10 & tmp5 tmp12 = tl.load(in_ptr0 + (4 * ((-1 + x1) // 2 % 4) + 16 * x3 + x0 // 2 ), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + x7, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 144 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 8 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 4, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 5 * tmp13 + 25 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 5 * tmp13 + 25 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = 1.0 tmp30 = triton_helpers.minimum(tmp28, tmp29) tmp31 = tmp25 * tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 5 * tmp9 + 25 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 5 * tmp9 + 25 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp30 tmp36 = tmp32 + tmp35 tmp37 = tmp24 + tmp31 tmp38 = tmp37 - tmp36 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp29) tmp43 = tmp38 * tmp42 tmp44 = tmp36 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 4, 4), (16, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2, 9, 8), (144, 72, 8, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 1, 8, 8), (64, 1, 8, 1), torch.float32) triton_poi_fused_convolution_1[grid(128, 8)](buf0, buf1, 128, 8, XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = extern_kernels.convolution(buf1, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (16, 1, 5, 5), (25, 1, 5, 1)) del arg1_1 buf3 = reinterpret_tensor(buf1, (16, 1, 8, 8), (64, 1024, 8, 1), 0) del buf1 buf5 = buf3 del buf3 buf6 = reinterpret_tensor(buf5, (16, 1, 8, 8), (64, 1, 8, 1), 0) del buf5 triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_2[grid (1024)](buf6, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf2 return reinterpret_tensor(buf6, (4, 4, 8, 8), (256, 64, 8, 1), 0), def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Upsample2dNew(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = torch.flip(self.k, [2, 3]) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, input_0): arg1_1 = self.k arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
tomguluson92/StyleGAN2_PyTorch
Upsample2d
false
16,601
[ "MIT" ]
89
4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
https://github.com/tomguluson92/StyleGAN2_PyTorch/tree/4ab7354c85cb986d2b77f5238c4a18c5efd1db1b
OptimizedResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bm/cbmrhif3lbp3fphqot3udpfwjleoia3nr4y44mr3ouepnl6xvum6.py # Topologically Sorted Source Nodes: [add, add_1, add_2, output], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # output => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_12), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_16), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 4), kwargs = {}) triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wr/cwrnvbcrqbwio7a7rmvbucor4fxeporot7ddnzmcwkeqmvvr3dnk.py # Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_2 => convolution_1 # output_3 => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 9) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dh/cdhl7radi5y64ogaso4om5izyjakfvfpac7ljxek3l6lxikmcuzq.py # Topologically Sorted Source Nodes: [add_3, add_4, add_5, output_5], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # output_5 => div_1 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_20, %slice_24), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %slice_28), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %slice_32), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_5, 4), kwargs = {}) triton_poi_fused_add_div_2 = async_compile.triton('triton_poi_fused_add_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4q/c4qhi5mwosilbpvrh52quhx24qbjckg4ntwd5hzhkozvrvrpmofd.py # Topologically Sorted Source Nodes: [output_1, add_3, add_4, add_5, output_5, add_6], Original ATen: [aten.convolution, aten.add, aten.div] # Source node to ATen node mapping: # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # output_1 => convolution # output_5 => div_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_20, %slice_24), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %slice_28), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %slice_32), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_5, 4), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %div_1), kwargs = {}) triton_poi_fused_add_convolution_div_3 = async_compile.triton('triton_poi_fused_add_convolution_div_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_div_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 x4 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, add_2, output], Original ATen: [aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 3, 3), (36, 9, 3, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 144, grid=grid(144), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1)) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [add_3, add_4, add_5, output_5], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_2.run(buf4, primals_7, buf5, 16, grid=grid(16), stream=stream0) del buf4 del primals_7 buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_1, add_3, add_4, add_5, output_5, add_6], Original ATen: [aten.convolution, aten.add, aten.div] triton_poi_fused_add_convolution_div_3.run(buf6, primals_3, buf5, 64, grid=grid(64), stream=stream0) del buf5 del primals_3 return (buf6, primals_1, primals_2, primals_4, primals_6, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.utils as utils from torchvision import utils class CustomConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, bias=True, spectral_norm=False, residual_init=True): super(CustomConv2d, self).__init__() self.residual_init = residual_init if padding is None: padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) if spectral_norm: self.conv = utils.spectral_norm(self.conv) def forward(self, input): return self.conv(input) class ConvMeanPool(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(ConvMeanPool, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = self.conv(output) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(MeanPoolConv, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class OptimizedResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, spectral_norm=False): super(OptimizedResidualBlock, self).__init__() self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv_shortcut = MeanPoolConv(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.relu2 = nn.ReLU() def forward(self, input): shortcut = self.conv_shortcut(input) output = input output = self.conv1(output) output = self.relu2(output) output = self.conv2(output) return shortcut + output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.utils as utils from torchvision import utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 0.25 tmp13 = tmp11 * tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_convolution_div_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 x4 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 3, 3), (36, 9, 3, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(144)](buf3, primals_5, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1)) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_poi_fused_add_div_2[grid(16)](buf4, primals_7, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del primals_7 buf6 = buf1 del buf1 triton_poi_fused_add_convolution_div_3[grid(64)](buf6, primals_3, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 del primals_3 return buf6, primals_1, primals_2, primals_4, primals_6, buf0, buf3 class CustomConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None, bias=True, spectral_norm=False, residual_init=True): super(CustomConv2d, self).__init__() self.residual_init = residual_init if padding is None: padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) if spectral_norm: self.conv = utils.spectral_norm(self.conv) def forward(self, input): return self.conv(input) class ConvMeanPool(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(ConvMeanPool, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = self.conv(output) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, spectral_norm=False, residual_init=True): super(MeanPoolConv, self).__init__() self.conv = CustomConv2d(in_channels, out_channels, kernel_size, bias=bias, spectral_norm=spectral_norm, residual_init=residual_init ) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class OptimizedResidualBlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, spectral_norm=False): super(OptimizedResidualBlockNew, self).__init__() self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size= kernel_size, spectral_norm=spectral_norm) self.conv_shortcut = MeanPoolConv(in_channels, out_channels, kernel_size=1, spectral_norm=spectral_norm, residual_init=False) self.relu2 = nn.ReLU() def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_4 = self.conv2.conv.conv.weight primals_5 = self.conv2.conv.conv.bias primals_2 = self.conv_shortcut.conv.conv.weight primals_7 = self.conv_shortcut.conv.conv.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
takuhirok/rGAN
OptimizedResidualBlock
false
16,602
[ "MIT" ]
103
6f7a092de5814c662fd17224b3d48bebe7e03c2f
https://github.com/takuhirok/rGAN/tree/6f7a092de5814c662fd17224b3d48bebe7e03c2f
WassersteinGeneratorLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ki/ckivxu6u7mrscvnpecxlyyzlxm7gqbwpixyjztmguny3ofhbig7t.py # Topologically Sorted Source Nodes: [mul, mean], Original ATen: [aten.mul, aten.mean] # Source node to ATen node mapping: # mean => mean # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_mean_mul_0 = async_compile.triton('triton_per_fused_mean_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = -1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, mean], Original ATen: [aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_generator_loss(fgz, reduction='mean'): return reduce(-1.0 * fgz, reduction) class GeneratorLoss(nn.Module): """Base class for all generator losses. .. note:: All Losses meant to be minimized for optimizing the Generator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(GeneratorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_generator, device, batch_size, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value = discriminator(fake)` 3. :math:`loss = loss\\_function(value)` 4. Backpropagate by computing :math:`\\nabla loss` 5. Run a step of the optimizer for generator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``generator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_generator, device, batch_size, labels) else: if labels is None and generator.label_type == 'required': raise Exception('GAN model requires labels for training') noise = torch.randn(batch_size, generator.encoding_dims, device =device) optimizer_generator.zero_grad() if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) elif generator.label_type == 'generated': fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake) elif generator.label_type == 'generated': dgz = discriminator(fake, label_gen) else: dgz = discriminator(fake, labels) loss = self.forward(dgz) loss.backward() optimizer_generator.step() return loss.item() class WassersteinGeneratorLoss(GeneratorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(G) = -f(G(z)) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def forward(self, fgz): """Computes the loss for the given input. Args: dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return wasserstein_generator_loss(fgz, self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = -1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_generator_loss(fgz, reduction='mean'): return reduce(-1.0 * fgz, reduction) class GeneratorLoss(nn.Module): """Base class for all generator losses. .. note:: All Losses meant to be minimized for optimizing the Generator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(GeneratorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_generator, device, batch_size, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value = discriminator(fake)` 3. :math:`loss = loss\\_function(value)` 4. Backpropagate by computing :math:`\\nabla loss` 5. Run a step of the optimizer for generator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``generator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_generator, device, batch_size, labels) else: if labels is None and generator.label_type == 'required': raise Exception('GAN model requires labels for training') noise = torch.randn(batch_size, generator.encoding_dims, device =device) optimizer_generator.zero_grad() if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) elif generator.label_type == 'generated': fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake) elif generator.label_type == 'generated': dgz = discriminator(fake, label_gen) else: dgz = discriminator(fake, labels) loss = self.forward(dgz) loss.backward() optimizer_generator.step() return loss.item() class WassersteinGeneratorLossNew(GeneratorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(G) = -f(G(z)) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
torchgan/torchgan
WassersteinGeneratorLoss
false
16,603
[ "MIT" ]
1,300
f4139537ac2d3d8609d5aecc859a6fb797b107a1
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
Buck
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kz/ckzkv4zq547mt5lgrn3gqnn4fzqr6zudb44tp5ljyz5my7mmgbyd.py # Topologically Sorted Source Nodes: [neg, mul, exp, mul_1, pow_1, truediv, sub], Original ATen: [aten.neg, aten.mul, aten.exp, aten.pow, aten.div, aten.sub] # Source node to ATen node mapping: # exp => exp # mul => mul # mul_1 => mul_1 # neg => neg # pow_1 => pow_1 # sub => sub # truediv => div # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%primals_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %primals_4), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %exp), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_4, 6), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %div), kwargs = {}) triton_poi_fused_div_exp_mul_neg_pow_sub_0 = async_compile.triton('triton_poi_fused_div_exp_mul_neg_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_mul_neg_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp5 = tl.load(in_ptr2 + (x0), xmask) tmp9 = tl.load(in_ptr3 + (0)) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp4 = -tmp3 tmp6 = tmp4 * tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tmp1 * tmp7 tmp11 = tmp5 * tmp5 tmp12 = tmp11 * tmp5 tmp13 = tmp12 * tmp12 tmp14 = tmp10 / tmp13 tmp15 = tmp8 - tmp14 tl.store(out_ptr0 + (x0), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [neg, mul, exp, mul_1, pow_1, truediv, sub], Original ATen: [aten.neg, aten.mul, aten.exp, aten.pow, aten.div, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_mul_neg_pow_sub_0.run(primals_1, primals_2, primals_4, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_3 return (buf0, primals_1, primals_2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn class Buck(torch.nn.Module): def __init__(self, A=1.0, B=1.0, C=1.0): super(Buck, self).__init__() self.A = torch.nn.Parameter(torch.Tensor([A])) self.B = torch.nn.Parameter(torch.Tensor([B])) self.C = torch.nn.Parameter(torch.Tensor([C])) def Buckingham(self, r, A, B, C): return A * torch.exp(-B * r) - C / r ** 6 def forward(self, x): return self.Buckingham(x, self.A, self.B, self.C) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp5 = tl.load(in_ptr2 + x0, xmask) tmp9 = tl.load(in_ptr3 + 0) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp4 = -tmp3 tmp6 = tmp4 * tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tmp1 * tmp7 tmp11 = tmp5 * tmp5 tmp12 = tmp11 * tmp5 tmp13 = tmp12 * tmp12 tmp14 = tmp10 / tmp13 tmp15 = tmp8 - tmp14 tl.store(out_ptr0 + x0, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_neg_pow_sub_0[grid(256)](primals_1, primals_2, primals_4, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf0, primals_1, primals_2, primals_4 class BuckNew(torch.nn.Module): def __init__(self, A=1.0, B=1.0, C=1.0): super(BuckNew, self).__init__() self.A = torch.nn.Parameter(torch.Tensor([A])) self.B = torch.nn.Parameter(torch.Tensor([B])) self.C = torch.nn.Parameter(torch.Tensor([C])) def Buckingham(self, r, A, B, C): return A * torch.exp(-B * r) - C / r ** 6 def forward(self, input_0): primals_1 = self.A primals_2 = self.B primals_3 = self.C primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
torchmd/mdgrad
Buck
false
16,604
[ "MIT" ]
54
77bd7685b74b41acf54a9483546e1e8cb545eb01
https://github.com/torchmd/mdgrad/tree/77bd7685b74b41acf54a9483546e1e8cb545eb01
ParityPonderGRU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # h => full_default # Graph fragment: # %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zw/czwxbu6nuxhoujusu3krfetmqzx7rxloioah6gicy4ie2wmv6tqi.py # Topologically Sorted Source Nodes: [stack_3], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack_3 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_8, %primals_8, %primals_8, %primals_8, %primals_6, %primals_6, %primals_6],), kwargs = {}) triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (x0), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 5, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr1 + (x0), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 6, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (x0), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp0 >= tmp27 tmp32 = tl.full([1], 7, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tl.load(in_ptr1 + (x0), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + (x2), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hq/chq2ihz43k5f3canufmvrzz3vy4seaa7ohwwzmv2o6szsqgwtn4r.py # Topologically Sorted Source Nodes: [stack_4], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack_4 => cat_1 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_9, %primals_9, %primals_9, %primals_9, %primals_7, %primals_7, %primals_7],), kwargs = {}) triton_poi_fused_stack_2 = async_compile.triton('triton_poi_fused_stack_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp23 = tl.load(in_ptr1 + (0)) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tmp0 >= tmp3 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tmp0 >= tmp8 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp0 >= tmp12 tmp16 = tl.full([1], 4, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tmp0 >= tmp16 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp25 = tmp0 >= tmp20 tmp26 = tl.full([1], 6, tl.int64) tmp27 = tmp0 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp0 >= tmp26 tmp30 = tl.full([1], 7, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tl.where(tmp28, tmp24, tmp24) tmp33 = tl.where(tmp22, tmp24, tmp32) tmp34 = tl.where(tmp18, tmp6, tmp33) tmp35 = tl.where(tmp14, tmp6, tmp34) tmp36 = tl.where(tmp10, tmp6, tmp35) tmp37 = tl.where(tmp4, tmp6, tmp36) tl.store(out_ptr0 + (x0), tmp37, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/g2/cg23xreycdv6fxtkdfop75dzuhabmjs3xhrvq22ytdld2qihmoyg.py # Topologically Sorted Source Nodes: [stack_2], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4, %getitem_6, %getitem, %getitem_2, %getitem_4],), kwargs = {}) triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (x0 + (4*((-16) + x1))), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (x0 + (4*((-20) + x1))), tmp29 & xmask, other=0.0) tmp31 = tmp0 >= tmp27 tmp32 = tl.full([1], 28, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tl.load(in_ptr2 + (x0 + (4*((-24) + x1))), tmp31 & xmask, other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + (x2), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oo/coo3437vo4xsrwedl7lcrwfswym3k6uq6mzbtutpnydbupg5rgob.py # Topologically Sorted Source Nodes: [p_m, sigmoid_2, sub_8, bernoulli_2, sigmoid_1, sub_4, bernoulli_1, sigmoid, un_halted_prob_1, p_n_1, un_halted_prob_2, p_n_2, p_n_3, bernoulli, halted_1, sub_2, mul_3, mul_4, p_m_1, halt_1, sub_6, mul_10, mul_11, p_m_2, halted_2, sub_9, halt_2, sub_10, mul_17, mul_18, p_m_3, halted_3, mul_20, mul_13, mul_6, y_m_1, mul_12, y_m_2, mul_19, y_m_3, lambda_n_3, bernoulli_3, sub_13, halt_3, sub_14, mul_24, mul_25, p_m_4, mul_26, mul_27, y_m_4], Original ATen: [aten.new_zeros, aten.sigmoid, aten.rsub, aten.bernoulli, aten.mul, aten.add, aten.new_ones] # Source node to ATen node mapping: # bernoulli => lt_2 # bernoulli_1 => convert_element_type_1, lt_1 # bernoulli_2 => convert_element_type, lt # bernoulli_3 => convert_element_type_3, lt_3 # halt_1 => mul_10 # halt_2 => mul_13 # halt_3 => mul_23 # halted_1 => convert_element_type_2 # halted_2 => add_4 # halted_3 => add_6 # lambda_n_3 => full_default_6 # mul_10 => mul_11 # mul_11 => mul_12 # mul_12 => mul_19 # mul_13 => mul_17 # mul_17 => mul_14 # mul_18 => mul_15 # mul_19 => mul_20 # mul_20 => mul_16 # mul_24 => mul_24 # mul_25 => mul_25 # mul_26 => mul_26 # mul_27 => mul_27 # mul_3 => mul_7 # mul_4 => mul_8 # mul_6 => mul_18 # p_m => full_default_3 # p_m_1 => add_1 # p_m_2 => add_3 # p_m_3 => add_5 # p_m_4 => add_10 # p_n_1 => mul_2 # p_n_2 => mul_4 # p_n_3 => mul_5 # sigmoid => sigmoid_2 # sigmoid_1 => sigmoid_1 # sigmoid_2 => sigmoid # sub_10 => sub_10 # sub_13 => sub_13 # sub_14 => sub_14 # sub_2 => sub_4 # sub_4 => sub_2 # sub_6 => sub_7 # sub_8 => sub_1 # sub_9 => sub_9 # un_halted_prob_1 => sub_3 # un_halted_prob_2 => mul_3 # y_m_1 => add_7 # y_m_2 => add_8 # y_m_3 => add_9 # y_m_4 => add_11 # Graph fragment: # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_14,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select), kwargs = {}) # %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand, %select), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_13,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_1), kwargs = {}) # %lt_1 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_1, %select_1), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_1, torch.float32), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_12,), kwargs = {}) # %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_2), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %select_1), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_2), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %select), kwargs = {}) # %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {}) # %lt_2 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_2, %select_2), kwargs = {}) # %convert_element_type_2 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_2, torch.float32), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_2), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default_3, %sub_4), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %convert_element_type_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_8), kwargs = {}) # %mul_10 : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %sub_4), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_10), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sub_7), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %mul_10), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %mul_12), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, %mul_10), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_4), kwargs = {}) # %mul_13 : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %sub_9), kwargs = {}) # %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_13), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %sub_10), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %mul_13), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_14, %mul_15), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_13), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %mul_13), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %mul_10), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %convert_element_type_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_18), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %sub_7), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %mul_17), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, %sub_10), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_20, %mul_16), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %lt_3 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_3, %full_default_6), kwargs = {}) # %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_3, torch.float32), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_6), kwargs = {}) # %mul_23 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_3, %sub_13), kwargs = {}) # %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_23), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %sub_14), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %mul_23), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, %mul_25), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, %sub_14), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %mul_23), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {}) triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4 = async_compile.triton('triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*i1', 13: '*i1', 14: '*i1', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (4)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp5 = tl.load(in_ptr2 + (16 + x0), xmask) tmp8 = tl.load(in_ptr1 + (5)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (20 + x0), xmask) tmp13 = tl.load(in_ptr1 + (6)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp15 = tl.load(in_ptr2 + (24 + x0), xmask) tmp18 = tl.load(in_ptr3 + (x0), xmask) tmp20 = tl.load(in_ptr4 + (x0), xmask) tmp22 = tl.load(in_ptr5 + (x0), xmask) tmp59 = tl.load(in_ptr1 + (0)) tmp60 = tl.broadcast_to(tmp59, [XBLOCK]) tmp61 = tl.load(in_ptr2 + (x0), xmask) tmp66 = tl.load(in_ptr1 + (1)) tmp67 = tl.broadcast_to(tmp66, [XBLOCK]) tmp68 = tl.load(in_ptr2 + (4 + x0), xmask) tmp73 = tl.load(in_ptr1 + (2)) tmp74 = tl.broadcast_to(tmp73, [XBLOCK]) tmp75 = tl.load(in_ptr2 + (8 + x0), xmask) tmp80 = tl.load(in_ptr1 + (3)) tmp81 = tl.broadcast_to(tmp80, [XBLOCK]) tmp82 = tl.load(in_ptr2 + (12 + x0), xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp11 = tmp9 + tmp10 tmp12 = tl.sigmoid(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp19 = tmp18 < tmp17 tmp21 = tmp20 < tmp12 tmp23 = tmp22 < tmp7 tmp24 = tmp23.to(tl.float32) tmp25 = tmp1 - tmp24 tmp26 = 0.0 tmp27 = tmp26 * tmp25 tmp28 = tmp7 * tmp24 tmp29 = tmp27 + tmp28 tmp30 = tmp21.to(tl.float32) tmp31 = tmp30 * tmp25 tmp32 = tmp1 - tmp31 tmp33 = tmp29 * tmp32 tmp34 = tmp1 - tmp7 tmp35 = tmp34 * tmp12 tmp36 = tmp35 * tmp31 tmp37 = tmp33 + tmp36 tmp38 = tmp19.to(tl.float32) tmp39 = tmp24 + tmp31 tmp40 = tmp1 - tmp39 tmp41 = tmp38 * tmp40 tmp42 = tmp1 - tmp41 tmp43 = tmp37 * tmp42 tmp44 = tmp1 - tmp12 tmp45 = tmp34 * tmp44 tmp46 = tmp45 * tmp17 tmp47 = tmp46 * tmp41 tmp48 = tmp43 + tmp47 tmp49 = tmp2.to(tl.float32) tmp50 = tmp39 + tmp41 tmp51 = tmp1 - tmp50 tmp52 = tmp49 * tmp51 tmp53 = tmp1 - tmp52 tmp54 = tmp48 * tmp53 tmp55 = tmp1 - tmp17 tmp56 = tmp45 * tmp55 tmp57 = tmp56 * tmp52 tmp58 = tmp54 + tmp57 tmp62 = tmp60 + tmp61 tmp63 = tmp62 * tmp24 tmp64 = tmp27 + tmp63 tmp65 = tmp64 * tmp32 tmp69 = tmp67 + tmp68 tmp70 = tmp69 * tmp31 tmp71 = tmp65 + tmp70 tmp72 = tmp71 * tmp42 tmp76 = tmp74 + tmp75 tmp77 = tmp76 * tmp41 tmp78 = tmp72 + tmp77 tmp79 = tmp78 * tmp53 tmp83 = tmp81 + tmp82 tmp84 = tmp83 * tmp52 tmp85 = tmp79 + tmp84 tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr1 + (x0), tmp7, xmask) tl.store(out_ptr2 + (x0), tmp12, xmask) tl.store(out_ptr3 + (x0), tmp17, xmask) tl.store(out_ptr4 + (x0), tmp19, xmask) tl.store(out_ptr5 + (x0), tmp21, xmask) tl.store(out_ptr6 + (x0), tmp23, xmask) tl.store(in_out_ptr0 + (x0), tmp58, xmask) tl.store(in_out_ptr1 + (x0), tmp85, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jy/cjys3iopkrrusczes7dimlrpecwecndhklisdis5ynngdb4dpi3f.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_2, %mul_2, %mul_4, %mul_5],), kwargs = {}) triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 1.0 tmp12 = tmp11 - tmp10 tmp13 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tmp12 * tmp13 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tmp0 >= tmp7 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr0 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp11 - tmp21 tmp23 = tl.load(in_ptr1 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp11 - tmp23 tmp25 = tmp22 * tmp24 tmp26 = tl.load(in_ptr2 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tmp0 >= tmp18 tmp31 = tl.full([1], 16, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tl.load(in_ptr0 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp11 - tmp33 tmp35 = tl.load(in_ptr1 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp11 - tmp35 tmp37 = tmp34 * tmp36 tmp38 = tl.load(in_ptr2 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tmp11 - tmp38 tmp40 = tmp37 * tmp39 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp30, tmp40, tmp41) tmp43 = tl.where(tmp20, tmp29, tmp42) tmp44 = tl.where(tmp9, tmp16, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + (x0), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o4/co4faoo6vn5p4s7le2cjisym3ezxxx6wg265nankhpf5szhdz3rv.py # Topologically Sorted Source Nodes: [stack_1], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack_1 => cat_4 # Graph fragment: # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_5, %select_4, %select_3, %select_6],), kwargs = {}) triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp15 = tl.load(in_ptr0 + (1)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp25 = tl.load(in_ptr0 + (2)) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp34 = tl.load(in_ptr0 + (3)) tmp35 = tl.broadcast_to(tmp34, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp17 = tl.load(in_ptr1 + (4 + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp27 = tl.load(in_ptr1 + (8 + ((-8) + x0)), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tmp32 = tl.full([1], 16, tl.int64) tmp33 = tmp0 < tmp32 tmp36 = tl.load(in_ptr1 + (12 + ((-12) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tl.store(out_ptr0 + (x0), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12, ), (1, )) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.new_zeros] stream0 = get_raw_stream(0) triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten._thnn_fused_gru_cell] buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0, primals_4, primals_5) buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [stack_3], Original ATen: [aten.stack] triton_poi_fused_stack_1.run(primals_8, primals_6, buf6, 28, grid=grid(28), stream=stream0) del primals_6 del primals_8 buf7 = empty_strided_cuda((7, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [stack_4], Original ATen: [aten.stack] triton_poi_fused_stack_2.run(primals_9, primals_7, buf7, 7, grid=grid(7), stream=stream0) del primals_7 del primals_9 buf8 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.mm] extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf8) # Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten._thnn_fused_gru_cell] buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4, primals_4, primals_5) buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [ret_2], Original ATen: [aten.mm] extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf12) # Topologically Sorted Source Nodes: [ret_2], Original ATen: [aten._thnn_fused_gru_cell] buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12, buf10, primals_4, primals_5) buf14 = buf13[0] buf15 = buf13[1] del buf13 buf16 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [ret_3], Original ATen: [aten.mm] extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf16) # Topologically Sorted Source Nodes: [ret_3], Original ATen: [aten._thnn_fused_gru_cell] buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16, buf14, primals_4, primals_5) del buf1 del buf16 del primals_4 del primals_5 buf18 = buf17[0] buf19 = buf17[1] del buf17 buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [stack_2], Original ATen: [aten.stack] triton_poi_fused_stack_3.run(buf4, buf10, buf14, buf18, buf20, 112, grid=grid(112), stream=stream0) buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [baddbmm], Original ATen: [aten.baddbmm] extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21) # Topologically Sorted Source Nodes: [bernoulli_2], Original ATen: [aten.bernoulli] buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf24 = buf23 del buf23 # Topologically Sorted Source Nodes: [bernoulli_1], Original ATen: [aten.bernoulli] buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf28 = buf27 del buf27 # Topologically Sorted Source Nodes: [bernoulli], Original ATen: [aten.bernoulli] buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf32 = buf31 del buf31 # Topologically Sorted Source Nodes: [bernoulli_3], Original ATen: [aten.bernoulli] buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4, ), (1, ), torch.bool) buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf25 = empty_strided_cuda((4, ), (1, ), torch.bool) buf29 = empty_strided_cuda((4, ), (1, ), torch.bool) buf33 = empty_strided_cuda((4, ), (1, ), torch.bool) buf34 = empty_strided_cuda((4, ), (1, ), torch.float32) buf39 = buf34; del buf34 # reuse buf35 = empty_strided_cuda((4, ), (1, ), torch.float32) buf40 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [p_m, sigmoid_2, sub_8, bernoulli_2, sigmoid_1, sub_4, bernoulli_1, sigmoid, un_halted_prob_1, p_n_1, un_halted_prob_2, p_n_2, p_n_3, bernoulli, halted_1, sub_2, mul_3, mul_4, p_m_1, halt_1, sub_6, mul_10, mul_11, p_m_2, halted_2, sub_9, halt_2, sub_10, mul_17, mul_18, p_m_3, halted_3, mul_20, mul_13, mul_6, y_m_1, mul_12, y_m_2, mul_19, y_m_3, lambda_n_3, bernoulli_3, sub_13, halt_3, sub_14, mul_24, mul_25, p_m_4, mul_26, mul_27, y_m_4], Original ATen: [aten.new_zeros, aten.sigmoid, aten.rsub, aten.bernoulli, aten.mul, aten.add, aten.new_ones] triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4.run(buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32, buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, grid=grid(4), stream=stream0) del buf24 del buf28 del buf32 del buf37 buf41 = reinterpret_tensor(buf18, (16, ), (1, ), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] triton_poi_fused_stack_5.run(buf30, buf26, buf22, buf41, 16, grid=grid(16), stream=stream0) buf42 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [stack_1], Original ATen: [aten.stack] triton_poi_fused_stack_6.run(buf7, buf21, buf42, 16, grid=grid(16), stream=stream0) del buf21 del buf7 return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0), reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40, primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19, buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor(buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4), (16, 1, 4), 0), primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch import nn from typing import Tuple import torch.utils.data import torch.nn.functional import torch.autograd class ParityPonderGRU(Module): """ ## PonderNet with GRU for Parity Task This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html) as the step function. This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`. Each element of the vector is either `0`, `1` or `-1` and the output is the parity - a binary value that is true if the number of `1`s is odd and false otherwise. The prediction of the model is the log probability of the parity being $1$. """ def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'): """ * `n_elems` is the number of elements in the input vector * `n_hidden` is the state vector size of the GRU * `max_steps` is the maximum number of steps $N$ """ super().__init__() self.max_steps = max_steps self.n_hidden = n_hidden self.gru = nn.GRUCell(n_elems, n_hidden) self.output_layer = nn.Linear(n_hidden, 1) self.lambda_layer = nn.Linear(n_hidden, 1) self.lambda_prob = nn.Sigmoid() self.is_halt = False def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ * `x` is the input of shape `[batch_size, n_elems]` This outputs a tuple of four tensors: 1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]` 2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$ 3. $p_m$ of shape `[batch_size]` 4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$ """ batch_size = x.shape[0] h = x.new_zeros((x.shape[0], self.n_hidden)) h = self.gru(x, h) p = [] y = [] un_halted_prob = h.new_ones((batch_size,)) halted = h.new_zeros((batch_size,)) p_m = h.new_zeros((batch_size,)) y_m = h.new_zeros((batch_size,)) for n in range(1, self.max_steps + 1): if n == self.max_steps: lambda_n = h.new_ones(h.shape[0]) else: lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0] y_n = self.output_layer(h)[:, 0] p_n = un_halted_prob * lambda_n un_halted_prob = un_halted_prob * (1 - lambda_n) halt = torch.bernoulli(lambda_n) * (1 - halted) p.append(p_n) y.append(y_n) p_m = p_m * (1 - halt) + p_n * halt y_m = y_m * (1 - halt) + y_n * halt halted = halted + halt h = self.gru(x, h) if self.is_halt and halted.sum() == batch_size: break return torch.stack(p), torch.stack(y), p_m, y_m def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_elems': 4, 'n_hidden': 4, 'max_steps': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + x0, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + x0, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 5, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr1 + x0, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 6, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + x0, tmp29 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tmp0 >= tmp27 tl.full([1], 7, tl.int64) tmp34 = tl.load(in_ptr1 + x0, tmp31 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 7 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp23 = tl.load(in_ptr1 + 0) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tmp0 >= tmp3 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tmp0 >= tmp8 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp0 >= tmp12 tmp16 = tl.full([1], 4, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tmp0 >= tmp16 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp25 = tmp0 >= tmp20 tmp26 = tl.full([1], 6, tl.int64) tmp27 = tmp0 < tmp26 tmp28 = tmp25 & tmp27 tl.full([1], 7, tl.int64) tmp32 = tl.where(tmp28, tmp24, tmp24) tmp33 = tl.where(tmp22, tmp24, tmp32) tmp34 = tl.where(tmp18, tmp6, tmp33) tmp35 = tl.where(tmp14, tmp6, tmp34) tmp36 = tl.where(tmp10, tmp6, tmp35) tmp37 = tl.where(tmp4, tmp6, tmp36) tl.store(out_ptr0 + x0, tmp37, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (x0 + 4 * (-16 + x1)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (x0 + 4 * (-20 + x1)), tmp29 & xmask, other=0.0) tmp31 = tmp0 >= tmp27 tl.full([1], 28, tl.int64) tmp34 = tl.load(in_ptr2 + (x0 + 4 * (-24 + x1)), tmp31 & xmask, other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 4) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp5 = tl.load(in_ptr2 + (16 + x0), xmask) tmp8 = tl.load(in_ptr1 + 5) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (20 + x0), xmask) tmp13 = tl.load(in_ptr1 + 6) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp15 = tl.load(in_ptr2 + (24 + x0), xmask) tmp18 = tl.load(in_ptr3 + x0, xmask) tmp20 = tl.load(in_ptr4 + x0, xmask) tmp22 = tl.load(in_ptr5 + x0, xmask) tmp59 = tl.load(in_ptr1 + 0) tmp60 = tl.broadcast_to(tmp59, [XBLOCK]) tmp61 = tl.load(in_ptr2 + x0, xmask) tmp66 = tl.load(in_ptr1 + 1) tmp67 = tl.broadcast_to(tmp66, [XBLOCK]) tmp68 = tl.load(in_ptr2 + (4 + x0), xmask) tmp73 = tl.load(in_ptr1 + 2) tmp74 = tl.broadcast_to(tmp73, [XBLOCK]) tmp75 = tl.load(in_ptr2 + (8 + x0), xmask) tmp80 = tl.load(in_ptr1 + 3) tmp81 = tl.broadcast_to(tmp80, [XBLOCK]) tmp82 = tl.load(in_ptr2 + (12 + x0), xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp11 = tmp9 + tmp10 tmp12 = tl.sigmoid(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp19 = tmp18 < tmp17 tmp21 = tmp20 < tmp12 tmp23 = tmp22 < tmp7 tmp24 = tmp23.to(tl.float32) tmp25 = tmp1 - tmp24 tmp26 = 0.0 tmp27 = tmp26 * tmp25 tmp28 = tmp7 * tmp24 tmp29 = tmp27 + tmp28 tmp30 = tmp21.to(tl.float32) tmp31 = tmp30 * tmp25 tmp32 = tmp1 - tmp31 tmp33 = tmp29 * tmp32 tmp34 = tmp1 - tmp7 tmp35 = tmp34 * tmp12 tmp36 = tmp35 * tmp31 tmp37 = tmp33 + tmp36 tmp38 = tmp19.to(tl.float32) tmp39 = tmp24 + tmp31 tmp40 = tmp1 - tmp39 tmp41 = tmp38 * tmp40 tmp42 = tmp1 - tmp41 tmp43 = tmp37 * tmp42 tmp44 = tmp1 - tmp12 tmp45 = tmp34 * tmp44 tmp46 = tmp45 * tmp17 tmp47 = tmp46 * tmp41 tmp48 = tmp43 + tmp47 tmp49 = tmp2.to(tl.float32) tmp50 = tmp39 + tmp41 tmp51 = tmp1 - tmp50 tmp52 = tmp49 * tmp51 tmp53 = tmp1 - tmp52 tmp54 = tmp48 * tmp53 tmp55 = tmp1 - tmp17 tmp56 = tmp45 * tmp55 tmp57 = tmp56 * tmp52 tmp58 = tmp54 + tmp57 tmp62 = tmp60 + tmp61 tmp63 = tmp62 * tmp24 tmp64 = tmp27 + tmp63 tmp65 = tmp64 * tmp32 tmp69 = tmp67 + tmp68 tmp70 = tmp69 * tmp31 tmp71 = tmp65 + tmp70 tmp72 = tmp71 * tmp42 tmp76 = tmp74 + tmp75 tmp77 = tmp76 * tmp41 tmp78 = tmp72 + tmp77 tmp79 = tmp78 * tmp53 tmp83 = tmp81 + tmp82 tmp84 = tmp83 * tmp52 tmp85 = tmp79 + tmp84 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr2 + x0, tmp12, xmask) tl.store(out_ptr3 + x0, tmp17, xmask) tl.store(out_ptr4 + x0, tmp19, xmask) tl.store(out_ptr5 + x0, tmp21, xmask) tl.store(out_ptr6 + x0, tmp23, xmask) tl.store(in_out_ptr0 + x0, tmp58, xmask) tl.store(in_out_ptr1 + x0, tmp85, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = 1.0 tmp12 = tmp11 - tmp10 tmp13 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 * tmp13 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tmp0 >= tmp7 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr0 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp11 - tmp21 tmp23 = tl.load(in_ptr1 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp11 - tmp23 tmp25 = tmp22 * tmp24 tmp26 = tl.load(in_ptr2 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tmp0 >= tmp18 tl.full([1], 16, tl.int64) tmp33 = tl.load(in_ptr0 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp34 = tmp11 - tmp33 tmp35 = tl.load(in_ptr1 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tmp11 - tmp35 tmp37 = tmp34 * tmp36 tmp38 = tl.load(in_ptr2 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp39 = tmp11 - tmp38 tmp40 = tmp37 * tmp39 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp30, tmp40, tmp41) tmp43 = tl.where(tmp20, tmp29, tmp42) tmp44 = tl.where(tmp9, tmp16, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + x0, tmp45, xmask) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp15 = tl.load(in_ptr0 + 1) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp25 = tl.load(in_ptr0 + 2) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp34 = tl.load(in_ptr0 + 3) tmp35 = tl.broadcast_to(tmp34, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp17 = tl.load(in_ptr1 + (4 + (-4 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp27 = tl.load(in_ptr1 + (8 + (-8 + x0)), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tl.full([1], 16, tl.int64) tmp36 = tl.load(in_ptr1 + (12 + (-12 + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tl.store(out_ptr0 + x0, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2) buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0, primals_4, primals_5) buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32) triton_poi_fused_stack_1[grid(28)](primals_8, primals_6, buf6, 28, XBLOCK=32, num_warps=1, num_stages=1) del primals_6 del primals_8 buf7 = empty_strided_cuda((7,), (1,), torch.float32) triton_poi_fused_stack_2[grid(7)](primals_9, primals_7, buf7, 7, XBLOCK=8, num_warps=1, num_stages=1) del primals_7 del primals_9 buf8 = buf2 del buf2 extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf8) buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4, primals_4, primals_5) buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = buf8 del buf8 extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf12) buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12, buf10, primals_4, primals_5) buf14 = buf13[0] buf15 = buf13[1] del buf13 buf16 = buf12 del buf12 extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf16) buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16, buf14, primals_4, primals_5) del buf1 del buf16 del primals_4 del primals_5 buf18 = buf17[0] buf19 = buf17[1] del buf17 buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32) triton_poi_fused_stack_3[grid(112)](buf4, buf10, buf14, buf18, buf20, 112, XBLOCK=128, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21) buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf24 = buf23 del buf23 buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf28 = buf27 del buf27 buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf32 = buf31 del buf31 buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4,), (1,), torch.bool) buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf25 = empty_strided_cuda((4,), (1,), torch.bool) buf29 = empty_strided_cuda((4,), (1,), torch.bool) buf33 = empty_strided_cuda((4,), (1,), torch.bool) buf34 = empty_strided_cuda((4,), (1,), torch.float32) buf39 = buf34 del buf34 buf35 = empty_strided_cuda((4,), (1,), torch.float32) buf40 = buf35 del buf35 triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4[ grid(4)](buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32, buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf24 del buf28 del buf32 del buf37 buf41 = reinterpret_tensor(buf18, (16,), (1,), 0) del buf18 triton_poi_fused_stack_5[grid(16)](buf30, buf26, buf22, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_stack_6[grid(16)](buf7, buf21, buf42, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf21 del buf7 return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0), reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40, primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19, buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor (buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4 ), (16, 1, 4), 0), primals_3) class ParityPonderGRUNew(Module): """ ## PonderNet with GRU for Parity Task This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html) as the step function. This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`. Each element of the vector is either `0`, `1` or `-1` and the output is the parity - a binary value that is true if the number of `1`s is odd and false otherwise. The prediction of the model is the log probability of the parity being $1$. """ def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'): """ * `n_elems` is the number of elements in the input vector * `n_hidden` is the state vector size of the GRU * `max_steps` is the maximum number of steps $N$ """ super().__init__() self.max_steps = max_steps self.n_hidden = n_hidden self.gru = nn.GRUCell(n_elems, n_hidden) self.output_layer = nn.Linear(n_hidden, 1) self.lambda_layer = nn.Linear(n_hidden, 1) self.lambda_prob = nn.Sigmoid() self.is_halt = False def forward(self, input_0): primals_2 = self.gru.weight_ih primals_3 = self.gru.weight_hh primals_4 = self.gru.bias_ih primals_5 = self.gru.bias_hh primals_6 = self.output_layer.weight primals_7 = self.output_layer.bias primals_8 = self.lambda_layer.weight primals_9 = self.lambda_layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2], output[3]
techthiyanes/annotated_deep_learning_paper_implementations
ParityPonderGRU
false
16,605
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
WassersteinDiscriminatorLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/b3/cb3pc2vp6wrlkpzdz67xefuwj5y5w37f2osmgvolg5bcw2n4mtuq.py # Topologically Sorted Source Nodes: [sub, mean], Original ATen: [aten.sub, aten.mean] # Source node to ATen node mapping: # mean => mean # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {}) triton_per_fused_mean_sub_0 = async_compile.triton('triton_per_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, mean], Original ATen: [aten.sub, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_discriminator_loss(fx, fgz, reduction='mean'): return reduce(fgz - fx, reduction) class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class WassersteinDiscriminatorLoss(DiscriminatorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(D) = f(G(z)) - f(x) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. clip (tuple, optional): Tuple that specifies the maximum and minimum parameter clamping to be applied, as per the original version of the Wasserstein loss without Gradient Penalty. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, reduction='mean', clip=None, override_train_ops=None): super(WassersteinDiscriminatorLoss, self).__init__(reduction, override_train_ops) if (isinstance(clip, tuple) or isinstance(clip, list)) and len(clip ) > 1: self.clip = clip else: self.clip = None def forward(self, fx, fgz): """Computes the loss for the given input. Args: fx (torch.Tensor) : Output of the Discriminator with real data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. fgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return wasserstein_discriminator_loss(fx, fgz, self.reduction) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by wasserstein discriminator loss. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. Clamp the discriminator parameters to satisfy :math:`lipschitz\\ condition` 2. :math:`fake = generator(noise)` 3. :math:`value_1 = discriminator(fake)` 4. :math:`value_2 = discriminator(real)` 5. :math:`loss = loss\\_function(value_1, value_2)` 6. Backpropagate by computing :math:`\\nabla loss` 7. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if self.clip is not None: for p in discriminator.parameters(): p.data.clamp_(self.clip[0], self.clip[1]) return super(WassersteinDiscriminatorLoss, self).train_ops( generator, discriminator, optimizer_discriminator, real_inputs, device, labels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_discriminator_loss(fx, fgz, reduction='mean'): return reduce(fgz - fx, reduction) class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class WassersteinDiscriminatorLossNew(DiscriminatorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(D) = f(G(z)) - f(x) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. clip (tuple, optional): Tuple that specifies the maximum and minimum parameter clamping to be applied, as per the original version of the Wasserstein loss without Gradient Penalty. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, reduction='mean', clip=None, override_train_ops=None): super(WassersteinDiscriminatorLossNew, self).__init__(reduction, override_train_ops) if (isinstance(clip, tuple) or isinstance(clip, list)) and len(clip ) > 1: self.clip = clip else: self.clip = None def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by wasserstein discriminator loss. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. Clamp the discriminator parameters to satisfy :math:`lipschitz\\ condition` 2. :math:`fake = generator(noise)` 3. :math:`value_1 = discriminator(fake)` 4. :math:`value_2 = discriminator(real)` 5. :math:`loss = loss\\_function(value_1, value_2)` 6. Backpropagate by computing :math:`\\nabla loss` 7. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if self.clip is not None: for p in discriminator.parameters(): p.data.clamp_(self.clip[0], self.clip[1]) return super(WassersteinDiscriminatorLossNew, self).train_ops( generator, discriminator, optimizer_discriminator, real_inputs, device, labels) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
torchgan/torchgan
WassersteinDiscriminatorLoss
false
16,606
[ "MIT" ]
1,300
f4139537ac2d3d8609d5aecc859a6fb797b107a1
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
NormalizedLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] # Source node to ATen node mapping: # inputs => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xq/cxqusq75hwmr62uwu6yk5o74t3l5fybelvjedksxnzws64lv4efg.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # output_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %mm), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_0.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf2, buf3, 16, grid=grid(16), stream=stream0) return (buf3, primals_2, primals_3, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn class NormalizedLinear(torch.nn.Module): """ A advanced Linear layer which supports weight normalization or cosine normalization. """ def __init__(self, in_features, out_features, bias=False, feat_norm= True, scale_mode='learn', scale_init=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.feat_norm = feat_norm self.scale_mode = scale_mode self.scale_init = scale_init self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if self.scale_mode == 'constant': self.scale = scale_init elif self.scale_mode == 'learn': self.scale = torch.nn.Parameter(torch.ones(1) * scale_init) else: raise NotImplementedError def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight ) bound = 1 / math.sqrt(fan_in) torch.nn.init.uniform_(self.bias, -bound, bound) def forward(self, inputs): """ Args: inputs (torch.Tensor): (N, C) Return: output (torch.Tensor): (N, D) """ if self.feat_norm: inputs = torch.nn.functional.normalize(inputs, dim=1) output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1 ).t()) output = self.scale * output return output def extra_repr(self): s = 'in_features={in_features}, out_features={out_features}' if self.bias is None: s += ', bias=False' s += ', feat_norm={feat_norm}' s += ', scale_mode={scale_mode}' s += ', scale_init={scale_init}' return s.format(**self.__dict__) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = buf1 del buf1 triton_poi_fused_mul_1[grid(16)](primals_3, buf2, buf3, 16, XBLOCK= 16, num_warps=1, num_stages=1) return buf3, primals_2, primals_3, buf0, buf2 class NormalizedLinearNew(torch.nn.Module): """ A advanced Linear layer which supports weight normalization or cosine normalization. """ def __init__(self, in_features, out_features, bias=False, feat_norm= True, scale_mode='learn', scale_init=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.feat_norm = feat_norm self.scale_mode = scale_mode self.scale_init = scale_init self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if self.scale_mode == 'constant': self.scale = scale_init elif self.scale_mode == 'learn': self.scale = torch.nn.Parameter(torch.ones(1) * scale_init) else: raise NotImplementedError def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight ) bound = 1 / math.sqrt(fan_in) torch.nn.init.uniform_(self.bias, -bound, bound) def extra_repr(self): s = 'in_features={in_features}, out_features={out_features}' if self.bias is None: s += ', bias=False' s += ', feat_norm={feat_norm}' s += ', scale_mode={scale_mode}' s += ', scale_init={scale_init}' return s.format(**self.__dict__) def forward(self, input_0): primals_1 = self.weight primals_3 = self.scale primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
tonysy/cvpods
NormalizedLinear
false
16,607
[ "Apache-2.0" ]
548
e322d7842ca0e34b1ef6237ea6d350633efc793a
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
Value
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (1, 64), (64, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [state_values], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_7 return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Value(nn.Module): def __init__(self, num_inputs): super(Value, self).__init__() self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.value_head = nn.Linear(64, 1) self.value_head.weight.data.mul_(0.1) self.value_head.bias.data.mul_(0.0) def forward(self, x): x = F.tanh(self.affine1(x)) x = F.tanh(self.affine2(x)) state_values = self.value_head(x) return state_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (1, 64), (64, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class ValueNew(nn.Module): def __init__(self, num_inputs): super(ValueNew, self).__init__() self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.value_head = nn.Linear(64, 1) self.value_head.weight.data.mul_(0.1) self.value_head.bias.data.mul_(0.0) def forward(self, input_0): primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_6 = self.value_head.weight primals_7 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
tpbarron/pytorch-ppo
Value
false
16,608
[ "MIT" ]
47
f73226865e34443f93dbec58939329c9278828e8
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
MinimaxDiscriminatorLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ie/cielolrevovxc3jw6so752kawirsv5asxwujo5d3qeptlgin4mmh.py # Topologically Sorted Source Nodes: [loss, binary_cross_entropy_with_logits_1, loss_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.add] # Source node to ATen node mapping: # binary_cross_entropy_with_logits_1 => abs_2, exp_1, full_default_3, log1p_1, mean_1, minimum_1, neg_1, sub_4, sub_5 # loss => abs_1, exp, full_default, full_default_1, log1p, mean, minimum, mul_1, neg, sub_1, sub_2 # loss_1 => add # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %arg0_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_3, %arg1_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum_1, %log1p_1), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %sub_4), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp13 = tl.load(in_ptr1 + (r0), None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp14 = triton_helpers.minimum(tmp1, tmp13) tmp15 = tl_math.abs(tmp13) tmp16 = -tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = libdevice.log1p(tmp17) tmp19 = tmp14 - tmp18 tmp20 = tmp13 - tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp12 / tmp24 tmp26 = tmp23 / tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp27, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, binary_cross_entropy_with_logits_1, loss_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'): target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing) target_zeros = torch.zeros_like(dx) loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction= reduction) loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction =reduction) return loss class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class MinimaxDiscriminatorLoss(DiscriminatorLoss): """Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_ The loss can be described as: .. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))] where - :math:`G` : Generator - :math:`D` : Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: label_smoothing (float, optional): The factor by which the labels (1 in this case) needs to be smoothened. For example, label_smoothing = 0.2 changes the value of the real labels to 0.8. reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, label_smoothing=0.0, reduction='mean', override_train_ops=None): super(MinimaxDiscriminatorLoss, self).__init__(reduction, override_train_ops) self.label_smoothing = label_smoothing def forward(self, dx, dgz): """Computes the loss for the given input. Args: dx (torch.Tensor) : Output of the Discriminator with real data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return minimax_discriminator_loss(dx, dgz, label_smoothing=self. label_smoothing, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp13 = tl.load(in_ptr1 + r0, None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp14 = triton_helpers.minimum(tmp1, tmp13) tmp15 = tl_math.abs(tmp13) tmp16 = -tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = libdevice.log1p(tmp17) tmp19 = tmp14 - tmp18 tmp20 = tmp13 - tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp12 / tmp24 tmp26 = tmp23 / tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'): target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing) target_zeros = torch.zeros_like(dx) loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction= reduction) loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction =reduction) return loss class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class MinimaxDiscriminatorLossNew(DiscriminatorLoss): """Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_ The loss can be described as: .. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))] where - :math:`G` : Generator - :math:`D` : Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: label_smoothing (float, optional): The factor by which the labels (1 in this case) needs to be smoothened. For example, label_smoothing = 0.2 changes the value of the real labels to 0.8. reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, label_smoothing=0.0, reduction='mean', override_train_ops=None): super(MinimaxDiscriminatorLossNew, self).__init__(reduction, override_train_ops) self.label_smoothing = label_smoothing def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
torchgan/torchgan
MinimaxDiscriminatorLoss
false
16,609
[ "MIT" ]
1,300
f4139537ac2d3d8609d5aecc859a6fb797b107a1
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
VirtualBatchNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/23/c23jg33pa2prp4iwyevdetkedocjv56blfdlx3hu2waow3n2qbb3.py # Topologically Sorted Source Nodes: [mu, var], Original ATen: [aten.mean, aten.var] # Source node to ATen node mapping: # mu => mean # var => var # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0], True), kwargs = {}) # %var : [num_users=2] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 1, keepdim: True}) triton_poi_fused_mean_var_0 = async_compile.triton('triton_poi_fused_mean_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w2/cw2sqrlevlzzmj3khrkrchps3gaeoh3ly45dxijk5luzkvnebeie.py # Topologically Sorted Source Nodes: [add, std, sub, x, mul, out], Original ATen: [aten.add, aten.sqrt, aten.sub, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # out => add_1 # std => sqrt # sub => sub # x => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_1), kwargs = {}) triton_poi_fused_add_div_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 / tmp6 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu, var], Original ATen: [aten.mean, aten.var] stream0 = get_raw_stream(0) triton_poi_fused_mean_var_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, std, sub, x, mul, out], Original ATen: [aten.add, aten.sqrt, aten.sub, aten.div, aten.mul] triton_poi_fused_add_div_mul_sqrt_sub_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf2, buf1, buf0, primals_1, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class VirtualBatchNorm(nn.Module): """Virtual Batch Normalization Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Performs Normalizes the features of a batch based on the statistics collected on a reference batch of samples that are chosen once and fixed from the start, as opposed to regular batch normalization that uses the statistics of the batch being normalized Virtual Batch Normalization requires that the size of the batch being normalized is at least a multiple of (and ideally equal to) the size of the reference batch. Keep this in mind while choosing the batch size in ```torch.utils.data.DataLoader``` or use ```drop_last=True``` .. math:: y = \\frac{x - \\mathrm{E}[x_{ref}]}{\\sqrt{\\mathrm{Var}[x_{ref}] + \\epsilon}} * \\gamma + \\beta where - :math:`x` : Batch Being Normalized - :math:`x_{ref}` : Reference Batch Args: in_features (int): Size of the input dimension to be normalized eps (float, optional): Value to be added to variance for numerical stability while normalizing """ def __init__(self, in_features, eps=1e-05): super(VirtualBatchNorm, self).__init__() self.in_features = in_features self.scale = nn.Parameter(torch.ones(in_features)) self.bias = nn.Parameter(torch.zeros(in_features)) self.ref_mu = None self.ref_var = None self.eps = eps def _batch_stats(self, x): """Computes the statistics of the batch ``x``. Args: x (torch.Tensor): Tensor whose statistics need to be computed. Returns: A tuple of the mean and variance of the batch ``x``. """ mu = torch.mean(x, dim=0, keepdim=True) var = torch.var(x, dim=0, keepdim=True) return mu, var def _normalize(self, x, mu, var): """Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``. Args: x (torch.Tensor): The Tensor to be normalized. mu (torch.Tensor): Mean using which the Tensor is to be normalized. var (torch.Tensor): Variance used in the normalization of ``x``. Returns: Normalized Tensor ``x``. """ std = torch.sqrt(self.eps + var) x = (x - mu) / std sizes = list(x.size()) for dim, i in enumerate(x.size()): if dim != 1: sizes[dim] = 1 scale = self.scale.view(*sizes) bias = self.bias.view(*sizes) return x * scale + bias def forward(self, x, clear=True): """Computes the output of the Virtual Batch Normalization Args: x (torch.Tensor): A Torch Tensor of dimension at least 2 which is to be Normalized Returns: Torch Tensor of the same dimension after normalizing with respect to the statistics of the reference batch """ assert x.size(1) == self.in_features if self.ref_mu is None or self.ref_var is None: self.ref_mu, self.ref_var = self._batch_stats(x) self.ref_mu = self.ref_mu.clone().detach() self.ref_var = self.ref_var.clone().detach() out = self._normalize(x, self.ref_mu, self.ref_var) else: out = self._normalize(x, self.ref_mu, self.ref_var) if clear: self.ref_mu = None self.ref_var = None return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp21, xmask) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 / tmp6 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_var_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_sqrt_sub_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf2, buf1, buf0, primals_1, buf0, buf1 class VirtualBatchNormNew(nn.Module): """Virtual Batch Normalization Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Performs Normalizes the features of a batch based on the statistics collected on a reference batch of samples that are chosen once and fixed from the start, as opposed to regular batch normalization that uses the statistics of the batch being normalized Virtual Batch Normalization requires that the size of the batch being normalized is at least a multiple of (and ideally equal to) the size of the reference batch. Keep this in mind while choosing the batch size in ```torch.utils.data.DataLoader``` or use ```drop_last=True``` .. math:: y = \\frac{x - \\mathrm{E}[x_{ref}]}{\\sqrt{\\mathrm{Var}[x_{ref}] + \\epsilon}} * \\gamma + \\beta where - :math:`x` : Batch Being Normalized - :math:`x_{ref}` : Reference Batch Args: in_features (int): Size of the input dimension to be normalized eps (float, optional): Value to be added to variance for numerical stability while normalizing """ def __init__(self, in_features, eps=1e-05): super(VirtualBatchNormNew, self).__init__() self.in_features = in_features self.scale = nn.Parameter(torch.ones(in_features)) self.bias = nn.Parameter(torch.zeros(in_features)) self.ref_mu = None self.ref_var = None self.eps = eps def _batch_stats(self, x): """Computes the statistics of the batch ``x``. Args: x (torch.Tensor): Tensor whose statistics need to be computed. Returns: A tuple of the mean and variance of the batch ``x``. """ mu = torch.mean(x, dim=0, keepdim=True) var = torch.var(x, dim=0, keepdim=True) return mu, var def _normalize(self, x, mu, var): """Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``. Args: x (torch.Tensor): The Tensor to be normalized. mu (torch.Tensor): Mean using which the Tensor is to be normalized. var (torch.Tensor): Variance used in the normalization of ``x``. Returns: Normalized Tensor ``x``. """ std = torch.sqrt(self.eps + var) x = (x - mu) / std sizes = list(x.size()) for dim, i in enumerate(x.size()): if dim != 1: sizes[dim] = 1 scale = self.scale.view(*sizes) bias = self.bias.view(*sizes) return x * scale + bias def forward(self, input_0): primals_2 = self.scale primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
torchgan/torchgan
VirtualBatchNorm
false
16,610
[ "MIT" ]
1,300
f4139537ac2d3d8609d5aecc859a6fb797b107a1
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
CosineFastRCNNOutputLayers
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] # Source node to ATen node mapping: # inputs => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fk/cfk3au6bpii4t2zk7fuswzhjd4lviqkrkh2zykiukyxmuklrwoxj.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pt/cptezbtn24n3kxwvir5vn3gemn7jrmd4py73p6z2543wpzbl6lwt.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # output_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %mm), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((5, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 20, grid=grid(20), stream=stream0) buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 5), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_3, buf2, buf3, 20, grid=grid(20), stream=stream0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proposal_deltas], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_4 del primals_5 return (buf3, buf4, primals_1, primals_2, primals_3, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn class NormalizedLinear(torch.nn.Module): """ A advanced Linear layer which supports weight normalization or cosine normalization. """ def __init__(self, in_features, out_features, bias=False, feat_norm= True, scale_mode='learn', scale_init=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.feat_norm = feat_norm self.scale_mode = scale_mode self.scale_init = scale_init self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if self.scale_mode == 'constant': self.scale = scale_init elif self.scale_mode == 'learn': self.scale = torch.nn.Parameter(torch.ones(1) * scale_init) else: raise NotImplementedError def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight ) bound = 1 / math.sqrt(fan_in) torch.nn.init.uniform_(self.bias, -bound, bound) def forward(self, inputs): """ Args: inputs (torch.Tensor): (N, C) Return: output (torch.Tensor): (N, D) """ if self.feat_norm: inputs = torch.nn.functional.normalize(inputs, dim=1) output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1 ).t()) output = self.scale * output return output def extra_repr(self): s = 'in_features={in_features}, out_features={out_features}' if self.bias is None: s += ', bias=False' s += ', feat_norm={feat_norm}' s += ', scale_mode={scale_mode}' s += ', scale_init={scale_init}' return s.format(**self.__dict__) class CosineFastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, scale_mode='learn', scale_init=20.0): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(CosineFastRCNNOutputLayers, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = NormalizedLinear(input_size, num_classes + 1, scale_mode=scale_mode, scale_init=scale_init) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.bbox_pred]: if layer.bias is not None: nn.init.constant_(layer.bias, 0) def forward(self, x): if x.dim() > 2: x = torch.flatten(x, start_dim=1) scores = self.cls_score(x) proposal_deltas = self.bbox_pred(x) return scores, proposal_deltas def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4, 'cls_agnostic_bbox_reg': 4} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((5, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(20)](primals_2, buf1, 20, XBLOCK=32, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 5), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0) del buf1 triton_poi_fused_mul_2[grid(20)](primals_3, buf2, buf3, 20, XBLOCK= 32, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_4 del primals_5 return buf3, buf4, primals_1, primals_2, primals_3, buf0, buf2 class NormalizedLinear(torch.nn.Module): """ A advanced Linear layer which supports weight normalization or cosine normalization. """ def __init__(self, in_features, out_features, bias=False, feat_norm= True, scale_mode='learn', scale_init=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.feat_norm = feat_norm self.scale_mode = scale_mode self.scale_init = scale_init self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if self.scale_mode == 'constant': self.scale = scale_init elif self.scale_mode == 'learn': self.scale = torch.nn.Parameter(torch.ones(1) * scale_init) else: raise NotImplementedError def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight ) bound = 1 / math.sqrt(fan_in) torch.nn.init.uniform_(self.bias, -bound, bound) def forward(self, inputs): """ Args: inputs (torch.Tensor): (N, C) Return: output (torch.Tensor): (N, D) """ if self.feat_norm: inputs = torch.nn.functional.normalize(inputs, dim=1) output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1 ).t()) output = self.scale * output return output def extra_repr(self): s = 'in_features={in_features}, out_features={out_features}' if self.bias is None: s += ', bias=False' s += ', feat_norm={feat_norm}' s += ', scale_mode={scale_mode}' s += ', scale_init={scale_init}' return s.format(**self.__dict__) class CosineFastRCNNOutputLayersNew(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, scale_mode='learn', scale_init=20.0): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(CosineFastRCNNOutputLayersNew, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = NormalizedLinear(input_size, num_classes + 1, scale_mode=scale_mode, scale_init=scale_init) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.bbox_pred]: if layer.bias is not None: nn.init.constant_(layer.bias, 0) def forward(self, input_0): primals_2 = self.cls_score.weight primals_3 = self.cls_score.scale primals_1 = self.bbox_pred.weight primals_5 = self.bbox_pred.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
tonysy/cvpods
CosineFastRCNNOutputLayers
false
16,611
[ "Apache-2.0" ]
548
e322d7842ca0e34b1ef6237ea6d350633efc793a
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
MinibatchDiscrimination1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qj/cqjzt26fu5t4hqresf5x3iq3urrpyjfmcfaj3pqbi57msamyo7jz.py # Topologically Sorted Source Nodes: [sub, abs_1, sum_1, neg, exp], Original ATen: [aten.sub, aten.abs, aten.sum, aten.neg, aten.exp] # Source node to ATen node mapping: # abs_1 => abs_1 # exp => exp # neg => neg # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %permute), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [3]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) triton_per_fused_abs_exp_neg_sub_sum_0 = async_compile.triton('triton_per_fused_abs_exp_neg_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_exp_neg_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_exp_neg_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x5 = xindex % 16 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (r3 + (16*x5)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qh/cqhzpumg4wbpb7oef2bjhb6pmbbehhgep2m33qeyftdvpglrcodp.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %sub_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (16 + (4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.load(in_ptr1 + (32 + (4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + (4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 16), (64, 16, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [M], Original ATen: [aten.mm] extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, abs_1, sum_1, neg, exp], Original ATen: [aten.sub, aten.abs, aten.sum, aten.neg, aten.exp] stream0 = get_raw_stream(0) triton_per_fused_abs_exp_neg_sub_sum_0.run(buf2, buf0, 64, 16, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_2, buf2, buf3, 32, grid=grid(32), stream=stream0) return (buf3, buf0, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 16), (64, 16, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MinibatchDiscrimination1d(nn.Module): """1D Minibatch Discrimination Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Allows the Discriminator to easily detect mode collapse by augmenting the activations to the succeeding layer with side information that allows it to determine the 'closeness' of the minibatch examples with each other .. math :: M_i = T * f(x_{i}) .. math :: c_b(x_{i}, x_{j}) = \\exp(-||M_{i, b} - M_{j, b}||_1) \\in \\mathbb{R}. .. math :: o(x_{i})_b &= \\sum_{j=1}^{n} c_b(x_{i},x_{j}) \\in \\mathbb{R} \\\\ .. math :: o(x_{i}) &= \\Big[ o(x_{i})_1, o(x_{i})_2, \\dots, o(x_{i})_B \\Big] \\in \\mathbb{R}^B \\\\ .. math :: o(X) \\in \\mathbb{R}^{n \\times B} This is followed by concatenating :math:`o(x_{i})` and :math:`f(x_{i})` where - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Activations from an intermediate layer - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Parameter Tensor for generating minibatch discrimination matrix Args: in_features (int): Features input corresponding to dimension :math:`A` out_features (int): Number of output features that are to be concatenated corresponding to dimension :math:`B` intermediate_features (int): Intermediate number of features corresponding to dimension :math:`C` Returns: A Tensor of size :math:`(N, in_features + out_features)` where :math:`N` is the batch size """ def __init__(self, in_features, out_features, intermediate_features=16): super(MinibatchDiscrimination1d, self).__init__() self.in_features = in_features self.out_features = out_features self.intermediate_features = intermediate_features self.T = nn.Parameter(torch.Tensor(in_features, out_features, intermediate_features)) nn.init.normal_(self.T) def forward(self, x): """Computes the output of the Minibatch Discrimination Layer Args: x (torch.Tensor): A Torch Tensor of dimensions :math: `(N, infeatures)` Returns: 3D Torch Tensor of size :math: `(N,infeatures + outfeatures)` after applying Minibatch Discrimination """ M = torch.mm(x, self.T.view(self.in_features, -1)) M = M.view(-1, self.out_features, self.intermediate_features ).unsqueeze(0) M_t = M.permute(1, 0, 2, 3) out = torch.sum(torch.exp(-torch.abs(M - M_t).sum(3)), dim=0) - 1 return torch.cat([x, out], 1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_exp_neg_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x5 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (r3 + 16 * x5), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 16), (64, 16, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_abs_exp_neg_sub_sum_0[grid(64)](buf2, buf0, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_2, buf2, buf3, 32, XBLOCK= 32, num_warps=1, num_stages=1) return buf3, buf0, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class MinibatchDiscrimination1dNew(nn.Module): """1D Minibatch Discrimination Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Allows the Discriminator to easily detect mode collapse by augmenting the activations to the succeeding layer with side information that allows it to determine the 'closeness' of the minibatch examples with each other .. math :: M_i = T * f(x_{i}) .. math :: c_b(x_{i}, x_{j}) = \\exp(-||M_{i, b} - M_{j, b}||_1) \\in \\mathbb{R}. .. math :: o(x_{i})_b &= \\sum_{j=1}^{n} c_b(x_{i},x_{j}) \\in \\mathbb{R} \\\\ .. math :: o(x_{i}) &= \\Big[ o(x_{i})_1, o(x_{i})_2, \\dots, o(x_{i})_B \\Big] \\in \\mathbb{R}^B \\\\ .. math :: o(X) \\in \\mathbb{R}^{n \\times B} This is followed by concatenating :math:`o(x_{i})` and :math:`f(x_{i})` where - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Activations from an intermediate layer - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Parameter Tensor for generating minibatch discrimination matrix Args: in_features (int): Features input corresponding to dimension :math:`A` out_features (int): Number of output features that are to be concatenated corresponding to dimension :math:`B` intermediate_features (int): Intermediate number of features corresponding to dimension :math:`C` Returns: A Tensor of size :math:`(N, in_features + out_features)` where :math:`N` is the batch size """ def __init__(self, in_features, out_features, intermediate_features=16): super(MinibatchDiscrimination1dNew, self).__init__() self.in_features = in_features self.out_features = out_features self.intermediate_features = intermediate_features self.T = nn.Parameter(torch.Tensor(in_features, out_features, intermediate_features)) nn.init.normal_(self.T) def forward(self, input_0): primals_1 = self.T primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
torchgan/torchgan
MinibatchDiscrimination1d
false
16,612
[ "MIT" ]
1,300
f4139537ac2d3d8609d5aecc859a6fb797b107a1
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
GaussMembFunc
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4z/c4z5f5sypbvdzckwli2vgwoo37wqsejweryvcktm6nccmzns6jtu.py # Topologically Sorted Source Nodes: [sub, pow_1, neg, pow_2, mul, truediv, val], Original ATen: [aten.sub, aten.pow, aten.neg, aten.mul, aten.div, aten.exp] # Source node to ATen node mapping: # mul => mul # neg => neg # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # truediv => div # val => exp # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %mul), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) triton_poi_fused_div_exp_mul_neg_pow_sub_0 = async_compile.triton('triton_poi_fused_div_exp_mul_neg_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_mul_neg_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = -tmp4 tmp8 = tmp7 * tmp7 tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = tmp5 / tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, neg, pow_2, mul, truediv, val], Original ATen: [aten.sub, aten.pow, aten.neg, aten.mul, aten.div, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_mul_neg_pow_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def _mk_param(val): """Make a torch parameter from a scalar value""" if isinstance(val, torch.Tensor): val = val.item() return torch.nn.Parameter(torch.tensor(val, dtype=torch.float)) class GaussMembFunc(torch.nn.Module): """ Gaussian membership functions, defined by two parameters: mu, the mean (center) sigma, the standard deviation. """ def __init__(self, mu, sigma): super(GaussMembFunc, self).__init__() self.register_parameter('mu', _mk_param(mu)) self.register_parameter('sigma', _mk_param(sigma)) def forward(self, x): val = torch.exp(-torch.pow(x - self.mu, 2) / (2 * self.sigma ** 2)) return val def pretty(self): return 'GaussMembFunc {} {}'.format(self.mu, self.sigma) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'mu': 4, 'sigma': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = -tmp4 tmp8 = tmp7 * tmp7 tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = tmp5 / tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_neg_pow_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3, buf0 def _mk_param(val): """Make a torch parameter from a scalar value""" if isinstance(val, torch.Tensor): val = val.item() return torch.nn.Parameter(torch.tensor(val, dtype=torch.float)) class GaussMembFuncNew(torch.nn.Module): """ Gaussian membership functions, defined by two parameters: mu, the mean (center) sigma, the standard deviation. """ def __init__(self, mu, sigma): super(GaussMembFuncNew, self).__init__() self.register_parameter('mu', _mk_param(mu)) self.register_parameter('sigma', _mk_param(sigma)) def pretty(self): return 'GaussMembFunc {} {}'.format(self.mu, self.sigma) def forward(self, input_0): primals_1 = self.mu primals_3 = self.sigma primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
trituenhantaoio/anfis-pytorch
GaussMembFunc
false
16,613
[ "MIT" ]
66
7a6bf123d69b550e46abeddd5b4a776243d43aa6
https://github.com/trituenhantaoio/anfis-pytorch/tree/7a6bf123d69b550e46abeddd5b4a776243d43aa6
DisAlignCosineFastRCNNOutputLayers
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] # Source node to ATen node mapping: # inputs => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fk/cfk3au6bpii4t2zk7fuswzhjd4lviqkrkh2zykiukyxmuklrwoxj.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ta/ctayfyl2z6cnkebmnknzla3kln7iz6yv2atmlaug4fqou3wrwrt6.py # Topologically Sorted Source Nodes: [aligned_scores], Original ATen: [aten.cat] # Source node to ATen node mapping: # aligned_scores => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_1, %view], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp7 = tl.load(in_ptr1 + (0)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp9 = tl.load(in_ptr2 + ((5*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = tl.load(in_ptr3 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tl.load(in_ptr4 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tmp12 + tmp13 tmp15 = tmp6 * tmp14 tmp16 = 1.0 tmp17 = tmp16 - tmp6 tmp18 = tmp17 * tmp10 tmp19 = tmp15 + tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 5, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tl.load(in_ptr2 + (4 + (5*x1)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp8 * tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp22, tmp26, tmp27) tmp29 = tl.where(tmp4, tmp21, tmp28) tl.store(out_ptr0 + (x2), tmp29, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((5, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 20, grid=grid(20), stream=stream0) buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 5), (1, 4), 0), out=buf2) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_4 del primals_5 buf5 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [aligned_scores], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf4, primals_3, buf2, primals_6, primals_7, buf5, 20, grid=grid(20), stream=stream0) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proposal_deltas], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, primals_1, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 del primals_9 return (buf5, buf6, primals_1, primals_2, primals_3, primals_6, primals_7, buf0, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) class NormalizedLinear(torch.nn.Module): """ A advanced Linear layer which supports weight normalization or cosine normalization. """ def __init__(self, in_features, out_features, bias=False, feat_norm= True, scale_mode='learn', scale_init=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.feat_norm = feat_norm self.scale_mode = scale_mode self.scale_init = scale_init self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if self.scale_mode == 'constant': self.scale = scale_init elif self.scale_mode == 'learn': self.scale = torch.nn.Parameter(torch.ones(1) * scale_init) else: raise NotImplementedError def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight ) bound = 1 / math.sqrt(fan_in) torch.nn.init.uniform_(self.bias, -bound, bound) def forward(self, inputs): """ Args: inputs (torch.Tensor): (N, C) Return: output (torch.Tensor): (N, D) """ if self.feat_norm: inputs = torch.nn.functional.normalize(inputs, dim=1) output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1 ).t()) output = self.scale * output return output def extra_repr(self): s = 'in_features={in_features}, out_features={out_features}' if self.bias is None: s += ', bias=False' s += ', feat_norm={feat_norm}' s += ', scale_mode={scale_mode}' s += ', scale_init={scale_init}' return s.format(**self.__dict__) class DisAlignCosineFastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, scale_mode='learn', scale_init=20.0): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(DisAlignCosineFastRCNNOutputLayers, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = NormalizedLinear(input_size, num_classes + 1, scale_mode=scale_mode, scale_init=scale_init) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.logit_scale = nn.Parameter(torch.ones(num_classes)) self.logit_bias = nn.Parameter(torch.zeros(num_classes)) self.confidence_layer = nn.Linear(input_size, 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.confidence_layer.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]: if layer.bias is not None: nn.init.constant_(layer.bias, 0) def forward(self, x): if x.dim() > 2: x = torch.flatten(x, start_dim=1) scores = self.cls_score(x) confidence = self.confidence_layer(x).sigmoid() scores_tmp = confidence * (scores[:, :-1] * self.logit_scale + self .logit_bias) scores_tmp = scores_tmp + (1 - confidence) * scores[:, :-1] aligned_scores = cat([scores_tmp, scores[:, -1].view(-1, 1)], dim=1) proposal_deltas = self.bbox_pred(x) return aligned_scores, proposal_deltas def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4, 'cls_agnostic_bbox_reg': 4} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp7 = tl.load(in_ptr1 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp9 = tl.load(in_ptr2 + (5 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = tl.load(in_ptr3 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp10 * tmp11 tmp13 = tl.load(in_ptr4 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 + tmp13 tmp15 = tmp6 * tmp14 tmp16 = 1.0 tmp17 = tmp16 - tmp6 tmp18 = tmp17 * tmp10 tmp19 = tmp15 + tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp25 = tl.load(in_ptr2 + (4 + 5 * x1), tmp22 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp8 * tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp22, tmp26, tmp27) tmp29 = tl.where(tmp4, tmp21, tmp28) tl.store(out_ptr0 + x2, tmp29, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (5, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((5, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(20)](primals_2, buf1, 20, XBLOCK=32, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 5), (1, 4), 0), out=buf2) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_4 del primals_5 buf5 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0) del buf1 triton_poi_fused_cat_2[grid(20)](buf4, primals_3, buf2, primals_6, primals_7, buf5, 20, XBLOCK=32, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, primals_1, reinterpret_tensor( primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 del primals_9 return (buf5, buf6, primals_1, primals_2, primals_3, primals_6, primals_7, buf0, buf2, buf4) def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) class NormalizedLinear(torch.nn.Module): """ A advanced Linear layer which supports weight normalization or cosine normalization. """ def __init__(self, in_features, out_features, bias=False, feat_norm= True, scale_mode='learn', scale_init=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.feat_norm = feat_norm self.scale_mode = scale_mode self.scale_init = scale_init self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if self.scale_mode == 'constant': self.scale = scale_init elif self.scale_mode == 'learn': self.scale = torch.nn.Parameter(torch.ones(1) * scale_init) else: raise NotImplementedError def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight ) bound = 1 / math.sqrt(fan_in) torch.nn.init.uniform_(self.bias, -bound, bound) def forward(self, inputs): """ Args: inputs (torch.Tensor): (N, C) Return: output (torch.Tensor): (N, D) """ if self.feat_norm: inputs = torch.nn.functional.normalize(inputs, dim=1) output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1 ).t()) output = self.scale * output return output def extra_repr(self): s = 'in_features={in_features}, out_features={out_features}' if self.bias is None: s += ', bias=False' s += ', feat_norm={feat_norm}' s += ', scale_mode={scale_mode}' s += ', scale_init={scale_init}' return s.format(**self.__dict__) class DisAlignCosineFastRCNNOutputLayersNew(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, scale_mode='learn', scale_init=20.0): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int): number of foreground classes cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression box_dim (int): the dimension of bounding boxes. Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes """ super(DisAlignCosineFastRCNNOutputLayersNew, self).__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) self.cls_score = NormalizedLinear(input_size, num_classes + 1, scale_mode=scale_mode, scale_init=scale_init) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.logit_scale = nn.Parameter(torch.ones(num_classes)) self.logit_bias = nn.Parameter(torch.zeros(num_classes)) self.confidence_layer = nn.Linear(input_size, 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.confidence_layer.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]: if layer.bias is not None: nn.init.constant_(layer.bias, 0) def forward(self, input_0): primals_6 = self.logit_scale primals_7 = self.logit_bias primals_2 = self.cls_score.weight primals_3 = self.cls_score.scale primals_1 = self.bbox_pred.weight primals_9 = self.bbox_pred.bias primals_4 = self.confidence_layer.weight primals_5 = self.confidence_layer.bias primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
tonysy/cvpods
DisAlignCosineFastRCNNOutputLayers
false
16,615
[ "Apache-2.0" ]
548
e322d7842ca0e34b1ef6237ea6d350633efc793a
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
Policy
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/53/c5336tes3fejn37nhb2iijuur7spy3qcasflywbbqklxwgjxpcvr.py # Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp] # Source node to ATen node mapping: # action_std => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {}) triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp] triton_poi_fused_exp_1.run(primals_8, buf5, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import copy import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def square(a): return torch.pow(a, 2.0) class Policy(nn.Module): def __init__(self, num_inputs, num_outputs): super(Policy, self).__init__() self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.action_mean = nn.Linear(64, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.module_list_current = [self.affine1, self.affine2, self. action_mean, self.action_log_std] self.module_list_old = [None] * len(self.module_list_current) self.backup() def backup(self): for i in range(len(self.module_list_current)): self.module_list_old[i] = copy.deepcopy(self.module_list_current[i] ) def kl_div_p_q(self, p_mean, p_std, q_mean, q_std): """KL divergence D_{KL}[p(x)||q(x)] for a fully factorized Gaussian""" numerator = square(p_mean - q_mean) + square(p_std) - square(q_std) denominator = 2.0 * square(q_std) + eps return torch.sum(numerator / denominator + torch.log(q_std) - torch .log(p_std)) def kl_old_new(self): """Gives kld from old params to new params""" kl_div = self.kl_div_p_q(self.module_list_old[-2], self. module_list_old[-1], self.action_mean, self.action_log_std) return kl_div def entropy(self): """Gives entropy of current defined prob dist""" ent = torch.sum(self.action_log_std + 0.5 * torch.log(2.0 * np.pi * np.e)) return ent def forward(self, x, old=False): if old: x = F.tanh(self.module_list_old[0](x)) x = F.tanh(self.module_list_old[1](x)) action_mean = self.module_list_old[2](x) action_log_std = self.module_list_old[3].expand_as(action_mean) action_std = torch.exp(action_log_std) else: x = F.tanh(self.affine1(x)) x = F.tanh(self.affine2(x)) action_mean = self.action_mean(x) action_log_std = self.action_log_std.expand_as(action_mean) action_std = torch.exp(action_log_std) return action_mean, action_log_std, action_std def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import copy import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0 ), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 def square(a): return torch.pow(a, 2.0) class PolicyNew(nn.Module): def __init__(self, num_inputs, num_outputs): super(PolicyNew, self).__init__() self.affine1 = nn.Linear(num_inputs, 64) self.affine2 = nn.Linear(64, 64) self.action_mean = nn.Linear(64, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.module_list_current = [self.affine1, self.affine2, self. action_mean, self.action_log_std] self.module_list_old = [None] * len(self.module_list_current) self.backup() def backup(self): for i in range(len(self.module_list_current)): self.module_list_old[i] = copy.deepcopy(self.module_list_current[i] ) def kl_div_p_q(self, p_mean, p_std, q_mean, q_std): """KL divergence D_{KL}[p(x)||q(x)] for a fully factorized Gaussian""" numerator = square(p_mean - q_mean) + square(p_std) - square(q_std) denominator = 2.0 * square(q_std) + eps return torch.sum(numerator / denominator + torch.log(q_std) - torch .log(p_std)) def kl_old_new(self): """Gives kld from old params to new params""" kl_div = self.kl_div_p_q(self.module_list_old[-2], self. module_list_old[-1], self.action_mean, self.action_log_std) return kl_div def entropy(self): """Gives entropy of current defined prob dist""" ent = torch.sum(self.action_log_std + 0.5 * torch.log(2.0 * np.pi * np.e)) return ent def forward(self, input_0): primals_8 = self.action_log_std primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_6 = self.action_mean.weight primals_7 = self.action_mean.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1], output[2]
tpbarron/pytorch-ppo
Policy
false
16,616
[ "MIT" ]
47
f73226865e34443f93dbec58939329c9278828e8
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
ActorCritic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/53/c5336tes3fejn37nhb2iijuur7spy3qcasflywbbqklxwgjxpcvr.py # Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp] # Source node to ATen node mapping: # action_std => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {}) triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 64), (64, 1)) assert_size_stride(primals_10, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp] triton_poi_fused_exp_1.run(primals_8, buf5, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_10 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0), buf5, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_9, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import copy import torch import torch.nn as nn import torch.nn.functional as F class ActorCritic(nn.Module): def __init__(self, num_inputs, num_outputs, hidden=64): super(ActorCritic, self).__init__() self.affine1 = nn.Linear(num_inputs, hidden) self.affine2 = nn.Linear(hidden, hidden) self.action_mean = nn.Linear(hidden, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.value_head = nn.Linear(hidden, 1) self.module_list_current = [self.affine1, self.affine2, self. action_mean, self.action_log_std, self.value_head] self.module_list_old = [None] * len(self.module_list_current) self.backup() def backup(self): for i in range(len(self.module_list_current)): self.module_list_old[i] = copy.deepcopy(self.module_list_current[i] ) def forward(self, x, old=False): if old: x = F.tanh(self.module_list_old[0](x)) x = F.tanh(self.module_list_old[1](x)) action_mean = self.module_list_old[2](x) action_log_std = self.module_list_old[3].expand_as(action_mean) action_std = torch.exp(action_log_std) value = self.module_list_old[4](x) else: x = F.tanh(self.affine1(x)) x = F.tanh(self.affine2(x)) action_mean = self.action_mean(x) action_log_std = self.action_log_std.expand_as(action_mean) action_std = torch.exp(action_log_std) value = self.value_head(x) return action_mean, action_log_std, action_std, value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import copy import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 64), (64, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_10 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0 ), buf5, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_9, primals_6, primals_4 class ActorCriticNew(nn.Module): def __init__(self, num_inputs, num_outputs, hidden=64): super(ActorCriticNew, self).__init__() self.affine1 = nn.Linear(num_inputs, hidden) self.affine2 = nn.Linear(hidden, hidden) self.action_mean = nn.Linear(hidden, num_outputs) self.action_mean.weight.data.mul_(0.1) self.action_mean.bias.data.mul_(0.0) self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs)) self.value_head = nn.Linear(hidden, 1) self.module_list_current = [self.affine1, self.affine2, self. action_mean, self.action_log_std, self.value_head] self.module_list_old = [None] * len(self.module_list_current) self.backup() def backup(self): for i in range(len(self.module_list_current)): self.module_list_old[i] = copy.deepcopy(self.module_list_current[i] ) def forward(self, input_0): primals_8 = self.action_log_std primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_6 = self.action_mean.weight primals_7 = self.action_mean.bias primals_9 = self.value_head.weight primals_10 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1], output[2], output[3]
tpbarron/pytorch-ppo
ActorCritic
false
16,617
[ "MIT" ]
47
f73226865e34443f93dbec58939329c9278828e8
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
BellMembFunc
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/eb/cebvdz62qer4oz5rwrmgolin6f7xce7tjvzcl2p4jvitglhropzk.py # Topologically Sorted Source Nodes: [sub, truediv, dist, pow_2, add, reciprocal], Original ATen: [aten.sub, aten.div, aten.pow, aten.add, aten.reciprocal] # Source node to ATen node mapping: # add => add # dist => pow_1 # pow_2 => pow_2 # reciprocal => reciprocal # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %primals_3), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%pow_1, %primals_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) triton_poi_fused_add_div_pow_reciprocal_sub_0 = async_compile.triton('triton_poi_fused_add_div_pow_reciprocal_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_reciprocal_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_reciprocal_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp8 = tl.load(in_ptr3 + (0)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp3 / tmp5 tmp7 = tmp6 * tmp6 tmp10 = libdevice.pow(tmp7, tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tl.full([1], 1, tl.int32) tmp14 = tmp13 / tmp12 tl.store(out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (), ()) assert_size_stride(primals_4, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv, dist, pow_2, add, reciprocal], Original ATen: [aten.sub, aten.div, aten.pow, aten.add, aten.reciprocal] stream0 = get_raw_stream(0) triton_poi_fused_add_div_pow_reciprocal_sub_0.run(primals_2, primals_1, primals_3, primals_4, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, primals_3, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((), (), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def _mk_param(val): """Make a torch parameter from a scalar value""" if isinstance(val, torch.Tensor): val = val.item() return torch.nn.Parameter(torch.tensor(val, dtype=torch.float)) class BellMembFunc(torch.nn.Module): """ Generalised Bell membership function; defined by three parameters: a, the half-width (at the crossover point) b, controls the slope at the crossover point (which is -b/2a) c, the center point """ def __init__(self, a, b, c): super(BellMembFunc, self).__init__() self.register_parameter('a', _mk_param(a)) self.register_parameter('b', _mk_param(b)) self.register_parameter('c', _mk_param(c)) self.b.register_hook(BellMembFunc.b_log_hook) @staticmethod def b_log_hook(grad): """ Possibility of a log(0) in the grad for b, giving a nan. Fix this by replacing any nan in the grad with ~0. """ grad[torch.isnan(grad)] = 1e-09 return grad def forward(self, x): dist = torch.pow((x - self.c) / self.a, 2) return torch.reciprocal(1 + torch.pow(dist, self.b)) def pretty(self): return 'BellMembFunc {} {} {}'.format(self.a, self.b, self.c) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'a': 4, 'b': 4, 'c': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_reciprocal_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp8 = tl.load(in_ptr3 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp3 / tmp5 tmp7 = tmp6 * tmp6 tmp10 = libdevice.pow(tmp7, tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tl.full([1], 1, tl.int32) tmp14 = tmp13 / tmp12 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (), ()) assert_size_stride(primals_4, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_reciprocal_sub_0[grid(256)](primals_2, primals_1, primals_3, primals_4, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3, primals_4, buf0 def _mk_param(val): """Make a torch parameter from a scalar value""" if isinstance(val, torch.Tensor): val = val.item() return torch.nn.Parameter(torch.tensor(val, dtype=torch.float)) class BellMembFuncNew(torch.nn.Module): """ Generalised Bell membership function; defined by three parameters: a, the half-width (at the crossover point) b, controls the slope at the crossover point (which is -b/2a) c, the center point """ def __init__(self, a, b, c): super(BellMembFuncNew, self).__init__() self.register_parameter('a', _mk_param(a)) self.register_parameter('b', _mk_param(b)) self.register_parameter('c', _mk_param(c)) self.b.register_hook(BellMembFuncNew.b_log_hook) @staticmethod def b_log_hook(grad): """ Possibility of a log(0) in the grad for b, giving a nan. Fix this by replacing any nan in the grad with ~0. """ grad[torch.isnan(grad)] = 1e-09 return grad def pretty(self): return 'BellMembFunc {} {} {}'.format(self.a, self.b, self.c) def forward(self, input_0): primals_1 = self.a primals_3 = self.b primals_4 = self.c primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
trituenhantaoio/anfis-pytorch
BellMembFunc
false
16,618
[ "MIT" ]
66
7a6bf123d69b550e46abeddd5b4a776243d43aa6
https://github.com/trituenhantaoio/anfis-pytorch/tree/7a6bf123d69b550e46abeddd5b4a776243d43aa6
DataEmbedding_wo_pos
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wf/cwfgpodivarq2gzz7nodlok35jybiut5djlrlgyaw23yzzih2tt7.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] # Source node to ATen node mapping: # pad => copy # Graph fragment: # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1, %slice_2), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %copy, 2, 1, 5), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_7, 2, 0, 1), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 2, 5, 6), kwargs = {}) triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = (yindex // 6) tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + ((-4) + x2 + (4*y0) + (16*y1)), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float("nan") tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + ((-20) + x2 + (4*y0) + (16*y1)), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + (4*y0) + (16*y1)), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + ((-4) + x2 + (4*y0) + (16*y1)), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + (6*x2) + (24*y1)), tmp42, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/55/c55evpzrh3fywzeu3r57gxndcuhrqg3gekllkjsulnkdk5oceiwg.py # Topologically Sorted Source Nodes: [embedding, embedding_1, embedding_2, embedding_3, add, add_1, add_2, add_3, x_2], Original ATen: [aten.embedding, aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # embedding => embedding # embedding_1 => embedding_1 # embedding_2 => embedding_2 # embedding_3 => embedding_3 # x_2 => add_4 # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_4, %select), kwargs = {}) # %embedding_1 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_5, %select_1), kwargs = {}) # %embedding_2 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_6, %select_2), kwargs = {}) # %embedding_3 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_7, %select_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%embedding, %embedding_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %embedding_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %embedding_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 0.0), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %add_3), kwargs = {}) triton_poi_fused_add_embedding_1 = async_compile.triton('triton_poi_fused_add_embedding_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_embedding_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([XBLOCK], 24, tl.int32) tmp4 = tmp2 + tmp3 tmp5 = tmp2 < 0 tmp6 = tl.where(tmp5, tmp4, tmp2) tl.device_assert(((0 <= tmp6) & (tmp6 < 24)) | ~(xmask), "index out of bounds: 0 <= tmp6 < 24") tmp8 = tl.load(in_ptr1 + (x1 + (4*tmp6)), xmask, eviction_policy='evict_last') tmp10 = tmp9.to(tl.int64) tmp11 = tl.full([XBLOCK], 7, tl.int32) tmp12 = tmp10 + tmp11 tmp13 = tmp10 < 0 tmp14 = tl.where(tmp13, tmp12, tmp10) tl.device_assert(((0 <= tmp14) & (tmp14 < 7)) | ~(xmask), "index out of bounds: 0 <= tmp14 < 7") tmp16 = tl.load(in_ptr2 + (x1 + (4*tmp14)), xmask, eviction_policy='evict_last') tmp17 = tmp8 + tmp16 tmp19 = tmp18.to(tl.int64) tmp20 = tl.full([XBLOCK], 32, tl.int32) tmp21 = tmp19 + tmp20 tmp22 = tmp19 < 0 tmp23 = tl.where(tmp22, tmp21, tmp19) tl.device_assert(((0 <= tmp23) & (tmp23 < 32)) | ~(xmask), "index out of bounds: 0 <= tmp23 < 32") tmp25 = tl.load(in_ptr3 + (x1 + (4*tmp23)), xmask, eviction_policy='evict_last') tmp26 = tmp17 + tmp25 tmp28 = tmp27.to(tl.int64) tmp29 = tl.full([XBLOCK], 13, tl.int32) tmp30 = tmp28 + tmp29 tmp31 = tmp28 < 0 tmp32 = tl.where(tmp31, tmp30, tmp28) tl.device_assert(((0 <= tmp32) & (tmp32 < 13)) | ~(xmask), "index out of bounds: 0 <= tmp32 < 13") tmp34 = tl.load(in_ptr4 + (x1 + (4*tmp32)), xmask, eviction_policy='evict_last') tmp35 = tmp26 + tmp34 tmp36 = 0.0 tmp37 = tmp35 + tmp36 tmp38 = tmp0 + tmp37 tl.store(in_out_ptr0 + (x3), tmp38, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (24, 4), (4, 1)) assert_size_stride(primals_5, (7, 4), (4, 1)) assert_size_stride(primals_6, (32, 4), (4, 1)) assert_size_stride(primals_7, (13, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_copy_0.run(primals_1, buf1, 24, 4, grid=grid(24, 4), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [embedding, embedding_1, embedding_2, embedding_3, add, add_1, add_2, add_3, x_2], Original ATen: [aten.embedding, aten.add] triton_poi_fused_add_embedding_1.run(buf3, primals_3, primals_4, primals_5, primals_6, primals_7, 64, grid=grid(64), stream=stream0) del primals_3 del primals_4 del primals_5 del primals_6 del primals_7 return (buf3, primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((7, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((13, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class PositionalEmbedding(nn.Module): def __init__(self, d_model, max_len=5000): super(PositionalEmbedding, self).__init__() pe = torch.zeros(max_len, d_model).float() pe.require_grad = False position = torch.arange(0, max_len).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): return self.pe[:, :x.size(1)] class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x class FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class TemporalEmbedding(nn.Module): def __init__(self, d_model, embed_type='fixed', freq='h'): super(TemporalEmbedding, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_model) self.hour_embed = Embed(hour_size, d_model) self.weekday_embed = Embed(weekday_size, d_model) self.day_embed = Embed(day_size, d_model) self.month_embed = Embed(month_size, d_model) def forward(self, x): x = x.long() minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.0 hour_x = self.hour_embed(x[:, :, 3]) weekday_x = self.weekday_embed(x[:, :, 2]) day_x = self.day_embed(x[:, :, 1]) month_x = self.month_embed(x[:, :, 0]) return hour_x + weekday_x + day_x + month_x + minute_x class TimeFeatureEmbedding(nn.Module): def __init__(self, d_model, embed_type='timeF', freq='h'): super(TimeFeatureEmbedding, self).__init__() freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} d_inp = freq_map[freq] self.embed = nn.Linear(d_inp, d_model, bias=False) def forward(self, x): return self.embed(x) class DataEmbedding_wo_pos(nn.Module): def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1 ): super(DataEmbedding_wo_pos, self).__init__() self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) self.position_embedding = PositionalEmbedding(d_model=d_model) self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq ) if embed_type != 'timeF' else TimeFeatureEmbedding(d_model= d_model, embed_type=embed_type, freq=freq) self.dropout = nn.Dropout(p=dropout) def forward(self, x, x_mark): x = self.value_embedding(x) + self.temporal_embedding(x_mark) return self.dropout(x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_add_embedding_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([XBLOCK], 24, tl.int32) tmp4 = tmp2 + tmp3 tmp5 = tmp2 < 0 tmp6 = tl.where(tmp5, tmp4, tmp2) tl.device_assert((0 <= tmp6) & (tmp6 < 24) | ~xmask, 'index out of bounds: 0 <= tmp6 < 24') tmp8 = tl.load(in_ptr1 + (x1 + 4 * tmp6), xmask, eviction_policy= 'evict_last') tmp10 = tmp9.to(tl.int64) tmp11 = tl.full([XBLOCK], 7, tl.int32) tmp12 = tmp10 + tmp11 tmp13 = tmp10 < 0 tmp14 = tl.where(tmp13, tmp12, tmp10) tl.device_assert((0 <= tmp14) & (tmp14 < 7) | ~xmask, 'index out of bounds: 0 <= tmp14 < 7') tmp16 = tl.load(in_ptr2 + (x1 + 4 * tmp14), xmask, eviction_policy= 'evict_last') tmp17 = tmp8 + tmp16 tmp19 = tmp18.to(tl.int64) tmp20 = tl.full([XBLOCK], 32, tl.int32) tmp21 = tmp19 + tmp20 tmp22 = tmp19 < 0 tmp23 = tl.where(tmp22, tmp21, tmp19) tl.device_assert((0 <= tmp23) & (tmp23 < 32) | ~xmask, 'index out of bounds: 0 <= tmp23 < 32') tmp25 = tl.load(in_ptr3 + (x1 + 4 * tmp23), xmask, eviction_policy= 'evict_last') tmp26 = tmp17 + tmp25 tmp28 = tmp27.to(tl.int64) tmp29 = tl.full([XBLOCK], 13, tl.int32) tmp30 = tmp28 + tmp29 tmp31 = tmp28 < 0 tmp32 = tl.where(tmp31, tmp30, tmp28) tl.device_assert((0 <= tmp32) & (tmp32 < 13) | ~xmask, 'index out of bounds: 0 <= tmp32 < 13') tmp34 = tl.load(in_ptr4 + (x1 + 4 * tmp32), xmask, eviction_policy= 'evict_last') tmp35 = tmp26 + tmp34 tmp36 = 0.0 tmp37 = tmp35 + tmp36 tmp38 = tmp0 + tmp37 tl.store(in_out_ptr0 + x3, tmp38, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (24, 4), (4, 1)) assert_size_stride(primals_5, (7, 4), (4, 1)) assert_size_stride(primals_6, (32, 4), (4, 1)) assert_size_stride(primals_7, (13, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0) del buf2 triton_poi_fused_add_embedding_1[grid(64)](buf3, primals_3, primals_4, primals_5, primals_6, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 del primals_4 del primals_5 del primals_6 del primals_7 return buf3, primals_2, buf1 class PositionalEmbedding(nn.Module): def __init__(self, d_model, max_len=5000): super(PositionalEmbedding, self).__init__() pe = torch.zeros(max_len, d_model).float() pe.require_grad = False position = torch.arange(0, max_len).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): return self.pe[:, :x.size(1)] class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x class FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class TemporalEmbedding(nn.Module): def __init__(self, d_model, embed_type='fixed', freq='h'): super(TemporalEmbedding, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_model) self.hour_embed = Embed(hour_size, d_model) self.weekday_embed = Embed(weekday_size, d_model) self.day_embed = Embed(day_size, d_model) self.month_embed = Embed(month_size, d_model) def forward(self, x): x = x.long() minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.0 hour_x = self.hour_embed(x[:, :, 3]) weekday_x = self.weekday_embed(x[:, :, 2]) day_x = self.day_embed(x[:, :, 1]) month_x = self.month_embed(x[:, :, 0]) return hour_x + weekday_x + day_x + month_x + minute_x class TimeFeatureEmbedding(nn.Module): def __init__(self, d_model, embed_type='timeF', freq='h'): super(TimeFeatureEmbedding, self).__init__() freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} d_inp = freq_map[freq] self.embed = nn.Linear(d_inp, d_model, bias=False) def forward(self, x): return self.embed(x) class DataEmbedding_wo_posNew(nn.Module): def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1 ): super(DataEmbedding_wo_posNew, self).__init__() self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) self.position_embedding = PositionalEmbedding(d_model=d_model) self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq ) if embed_type != 'timeF' else TimeFeatureEmbedding(d_model= d_model, embed_type=embed_type, freq=freq) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0, input_1): primals_2 = self.value_embedding.tokenConv.weight primals_4 = self.temporal_embedding.hour_embed.emb.weight primals_5 = self.temporal_embedding.weekday_embed.emb.weight primals_6 = self.temporal_embedding.day_embed.emb.weight primals_7 = self.temporal_embedding.month_embed.emb.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
thuml/Autoformer
DataEmbedding_wo_pos
false
16,619
[ "MIT" ]
263
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
UpsampleConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # input_1 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1)) return (buf1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def l2normalize(v, esp=1e-08): return v / (v.norm() + esp) def sn_weight(weight, u, height, n_power_iterations): weight.requires_grad_(False) for _ in range(n_power_iterations): v = l2normalize(torch.mv(weight.view(height, -1).t(), u)) u = l2normalize(torch.mv(weight.view(height, -1), v)) weight.requires_grad_(True) sigma = u.dot(weight.view(height, -1).mv(v)) return torch.div(weight, sigma), u def get_nonlinearity(nonlinearity=None): if not nonlinearity: pass elif callable(nonlinearity): if nonlinearity == nn.LeakyReLU: nonlinearity = nonlinearity(0.02, inplace=True) elif hasattr(nn, nonlinearity): nonlinearity = getattr(nn, nonlinearity) if nonlinearity == 'LeakyReLU': nonlinearity = nonlinearity(0.02, inplace=True) else: nonlinearity = nonlinearity() elif hasattr(nn.functional, nonlinearity): nonlinearity = getattr(nn.functional, nonlinearity) else: raise ValueError(nonlinearity) return nonlinearity class SNConv2d(nn.Conv2d): def __init__(self, *args, n_power_iterations=1, **kwargs): super(SNConv2d, self).__init__(*args, **kwargs) self.n_power_iterations = n_power_iterations self.height = self.weight.shape[0] self.register_buffer('u', l2normalize(self.weight.new_empty(self. height).normal_(0, 1))) def forward(self, input): w_sn, self.u = sn_weight(self.weight, self.u, self.height, self. n_power_iterations) return F.conv2d(input, w_sn, self.bias, self.stride, self.padding, self.dilation, self.groups) class UpsampleConv(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix= '', spectral_norm=False): super(UpsampleConv, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = prefix + '_usc' models.add_module(name + '_up', nn.Upsample(scale_factor=2)) models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias= False)) if nonlinearity: models.add_module('{}_{}'.format(name, nonlinearity.__class__. __name__), nonlinearity) self.models = models def forward(self, x): x = self.models(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4, 'f_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1)) return buf1, primals_2, buf0 def l2normalize(v, esp=1e-08): return v / (v.norm() + esp) def sn_weight(weight, u, height, n_power_iterations): weight.requires_grad_(False) for _ in range(n_power_iterations): v = l2normalize(torch.mv(weight.view(height, -1).t(), u)) u = l2normalize(torch.mv(weight.view(height, -1), v)) weight.requires_grad_(True) sigma = u.dot(weight.view(height, -1).mv(v)) return torch.div(weight, sigma), u def get_nonlinearity(nonlinearity=None): if not nonlinearity: pass elif callable(nonlinearity): if nonlinearity == nn.LeakyReLU: nonlinearity = nonlinearity(0.02, inplace=True) elif hasattr(nn, nonlinearity): nonlinearity = getattr(nn, nonlinearity) if nonlinearity == 'LeakyReLU': nonlinearity = nonlinearity(0.02, inplace=True) else: nonlinearity = nonlinearity() elif hasattr(nn.functional, nonlinearity): nonlinearity = getattr(nn.functional, nonlinearity) else: raise ValueError(nonlinearity) return nonlinearity class SNConv2d(nn.Conv2d): def __init__(self, *args, n_power_iterations=1, **kwargs): super(SNConv2d, self).__init__(*args, **kwargs) self.n_power_iterations = n_power_iterations self.height = self.weight.shape[0] self.register_buffer('u', l2normalize(self.weight.new_empty(self. height).normal_(0, 1))) def forward(self, input): w_sn, self.u = sn_weight(self.weight, self.u, self.height, self. n_power_iterations) return F.conv2d(input, w_sn, self.bias, self.stride, self.padding, self.dilation, self.groups) class UpsampleConvNew(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix= '', spectral_norm=False): super(UpsampleConvNew, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = prefix + '_usc' models.add_module(name + '_up', nn.Upsample(scale_factor=2)) models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias= False)) if nonlinearity: models.add_module('{}_{}'.format(name, nonlinearity.__class__. __name__), nonlinearity) self.models = models def forward(self, input_0): primals_1 = self.models._usc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
tsirif/cortex
UpsampleConv
false
16,620
[ "BSD-3-Clause" ]
109
2837b220f9fb73279df3815bb18b274106412c08
https://github.com/tsirif/cortex/tree/2837b220f9fb73279df3815bb18b274106412c08
DQN_RAM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oa/coaoyy2tzwhkubpw5yl7y66o2j6ncc2opezn233rb4fu2ccncu3h.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu_2 # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (18, 64), (64, 1)) assert_size_stride(primals_9, (18, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse buf8 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf8, 8192, grid=grid(8192), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf4 # reuse buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf7, 4096, grid=grid(4096), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 18), (18, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 18), (1, 64), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf6, (4, 4, 4, 18), (288, 72, 18, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(buf5, (64, 64), (64, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((18, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DQN_RAM(nn.Module): def __init__(self, in_features=4, num_actions=18): """ Initialize a deep Q-learning network for testing algorithm in_features: number of features of input. num_actions: number of action-value to output, one-to-one correspondence to action in game. """ super(DQN_RAM, self).__init__() self.fc1 = nn.Linear(in_features, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, num_actions) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) return self.fc4(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (18, 64), (64, 1)) assert_size_stride(primals_9, (18,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf9, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf8, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_2[grid(4096)](buf5, primals_7, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 18), (18, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 18), (1, 64), 0 ), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 18), (288, 72, 18, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf5, (64, 64), (64, 1), 0 ), primals_8, buf7, primals_6, buf8, primals_4, buf9 class DQN_RAMNew(nn.Module): def __init__(self, in_features=4, num_actions=18): """ Initialize a deep Q-learning network for testing algorithm in_features: number of features of input. num_actions: number of action-value to output, one-to-one correspondence to action in game. """ super(DQN_RAMNew, self).__init__() self.fc1 = nn.Linear(in_features, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, num_actions) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
transedward/pytoch-dqn
DQN_RAM
false
16,621
[ "MIT" ]
358
1ffda6f3724b3bb37c3195b09b651b1682d4d4fd
https://github.com/transedward/pytoch-dqn/tree/1ffda6f3724b3bb37c3195b09b651b1682d4d4fd
MySimpleNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nu/cnuuaznpt4szfn74bn46qfjkdypvlkfa5x44ywjpperdjt2a66rj.py # Topologically Sorted Source Nodes: [X], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # X => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py # Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # X_2 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z5/cz5xs7y3thsep5yn6qoths757rduuevog6mtea3nqr4nwnh2olnx.py # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # X_3 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nv/cnvo7i3x3dm4mdtrcmoddo2p4odl6hgahimnieftjxkqwe7ehw54.py # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # X_3 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 10), (10, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) # Topologically Sorted Source Nodes: [X], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 640, grid=grid(640), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 128, grid=grid(128), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [X_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 128, grid=grid(128), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class MySimpleNet(nn.Module): """ Very simple 2-layer net, slightly adapted from the docs: https://skorch.readthedocs.io/en/stable/user/quickstart.html """ def __init__(self, num_in, num_feat, num_hidden=10, nonlin=F.relu): super(MySimpleNet, self).__init__() self.dense0 = nn.Linear(num_in, num_hidden) self.nonlin = nonlin self.dropout = nn.Dropout(0.5) self.dense1 = nn.Linear(num_hidden, num_feat) self.output = nn.Linear(num_feat, 2) def forward(self, X, **kwargs): X = self.nonlin(self.dense0(X)) X = self.dropout(X) X = F.relu(self.dense1(X)) X = F.softmax(self.output(X)) return X def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in': 4, 'num_feat': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 10), (10, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf8, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_2[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class MySimpleNetNew(nn.Module): """ Very simple 2-layer net, slightly adapted from the docs: https://skorch.readthedocs.io/en/stable/user/quickstart.html """ def __init__(self, num_in, num_feat, num_hidden=10, nonlin=F.relu): super(MySimpleNetNew, self).__init__() self.dense0 = nn.Linear(num_in, num_hidden) self.nonlin = nonlin self.dropout = nn.Dropout(0.5) self.dense1 = nn.Linear(num_hidden, num_feat) self.output = nn.Linear(num_feat, 2) def forward(self, input_0): primals_1 = self.dense0.weight primals_2 = self.dense0.bias primals_4 = self.dense1.weight primals_5 = self.dense1.bias primals_6 = self.output.weight primals_7 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
trituenhantaoio/anfis-pytorch
MySimpleNet
false
16,622
[ "MIT" ]
66
7a6bf123d69b550e46abeddd5b4a776243d43aa6
https://github.com/trituenhantaoio/anfis-pytorch/tree/7a6bf123d69b550e46abeddd5b4a776243d43aa6
MeanPoolConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l3/cl3qgtljwm55hj7prrlq32vnxhqj5elf2qeptwkrprrhumnm7twn.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # input_1 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [2, 2], [2, 2], [0, 0], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) return (buf1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def l2normalize(v, esp=1e-08): return v / (v.norm() + esp) def sn_weight(weight, u, height, n_power_iterations): weight.requires_grad_(False) for _ in range(n_power_iterations): v = l2normalize(torch.mv(weight.view(height, -1).t(), u)) u = l2normalize(torch.mv(weight.view(height, -1), v)) weight.requires_grad_(True) sigma = u.dot(weight.view(height, -1).mv(v)) return torch.div(weight, sigma), u def get_nonlinearity(nonlinearity=None): if not nonlinearity: pass elif callable(nonlinearity): if nonlinearity == nn.LeakyReLU: nonlinearity = nonlinearity(0.02, inplace=True) elif hasattr(nn, nonlinearity): nonlinearity = getattr(nn, nonlinearity) if nonlinearity == 'LeakyReLU': nonlinearity = nonlinearity(0.02, inplace=True) else: nonlinearity = nonlinearity() elif hasattr(nn.functional, nonlinearity): nonlinearity = getattr(nn.functional, nonlinearity) else: raise ValueError(nonlinearity) return nonlinearity class SNConv2d(nn.Conv2d): def __init__(self, *args, n_power_iterations=1, **kwargs): super(SNConv2d, self).__init__(*args, **kwargs) self.n_power_iterations = n_power_iterations self.height = self.weight.shape[0] self.register_buffer('u', l2normalize(self.weight.new_empty(self. height).normal_(0, 1))) def forward(self, input): w_sn, self.u = sn_weight(self.weight, self.u, self.height, self. n_power_iterations) return F.conv2d(input, w_sn, self.bias, self.stride, self.padding, self.dilation, self.groups) class MeanPoolConv(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix= '', spectral_norm=False): super(MeanPoolConv, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = 'mpc' + prefix models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad =False)) models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias= False)) if nonlinearity: models.add_module('{}_{}'.format(name, nonlinearity.__class__. __name__), nonlinearity) self.models = models def forward(self, x): x = self.models(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4, 'f_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) return buf1, primals_2, buf0 def l2normalize(v, esp=1e-08): return v / (v.norm() + esp) def sn_weight(weight, u, height, n_power_iterations): weight.requires_grad_(False) for _ in range(n_power_iterations): v = l2normalize(torch.mv(weight.view(height, -1).t(), u)) u = l2normalize(torch.mv(weight.view(height, -1), v)) weight.requires_grad_(True) sigma = u.dot(weight.view(height, -1).mv(v)) return torch.div(weight, sigma), u def get_nonlinearity(nonlinearity=None): if not nonlinearity: pass elif callable(nonlinearity): if nonlinearity == nn.LeakyReLU: nonlinearity = nonlinearity(0.02, inplace=True) elif hasattr(nn, nonlinearity): nonlinearity = getattr(nn, nonlinearity) if nonlinearity == 'LeakyReLU': nonlinearity = nonlinearity(0.02, inplace=True) else: nonlinearity = nonlinearity() elif hasattr(nn.functional, nonlinearity): nonlinearity = getattr(nn.functional, nonlinearity) else: raise ValueError(nonlinearity) return nonlinearity class SNConv2d(nn.Conv2d): def __init__(self, *args, n_power_iterations=1, **kwargs): super(SNConv2d, self).__init__(*args, **kwargs) self.n_power_iterations = n_power_iterations self.height = self.weight.shape[0] self.register_buffer('u', l2normalize(self.weight.new_empty(self. height).normal_(0, 1))) def forward(self, input): w_sn, self.u = sn_weight(self.weight, self.u, self.height, self. n_power_iterations) return F.conv2d(input, w_sn, self.bias, self.stride, self.padding, self.dilation, self.groups) class MeanPoolConvNew(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix= '', spectral_norm=False): super(MeanPoolConvNew, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = 'mpc' + prefix models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad =False)) models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias= False)) if nonlinearity: models.add_module('{}_{}'.format(name, nonlinearity.__class__. __name__), nonlinearity) self.models = models def forward(self, input_0): primals_1 = self.models.mpc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
tsirif/cortex
MeanPoolConv
false
16,623
[ "BSD-3-Clause" ]
109
2837b220f9fb73279df3815bb18b274106412c08
https://github.com/tsirif/cortex/tree/2837b220f9fb73279df3815bb18b274106412c08
ECB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/2v/c2vp4wevd4mmk6p3qeilou7zqojjaarvm3pedrgkmrhhjbggkpqu.py # Topologically Sorted Source Nodes: [RK], Original ATen: [aten.convolution] # Source node to ATen node mapping: # RK => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %permute, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qv/cqvr6sl23upg6qmriv4ttfkwdalh2tm5o52kvelb4o7ra7lg6rli.py # Topologically Sorted Source Nodes: [ones, RB], Original ATen: [aten.ones, aten.mul] # Source node to ATen node mapping: # RB => mul # ones => full_default # Graph fragment: # %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 3, 3], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %view), kwargs = {}) triton_poi_fused_mul_ones_1 = async_compile.triton('triton_poi_fused_mul_ones_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_ones_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_ones_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 9) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 * tmp0 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xu/cxuj47chxotact4pqfiotmxhglblpldmsey7mo3ep7qmqeqz3prc.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_2, %select_23, 0, 2), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 9) x0 = xindex % 9 x2 = xindex tmp3 = tl.load(in_ptr0 + (2)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp5 = tl.load(in_ptr1 + (18 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp12 = tl.load(in_ptr1 + (9 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (0)) tmp18 = tl.broadcast_to(tmp17, [XBLOCK]) tmp19 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp6 = tmp4 * tmp5 tmp7 = tl.full([1], 1, tl.int32) tmp8 = tmp1 == tmp7 tmp9 = tmp0 == tmp7 tmp13 = tmp11 * tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = tmp7 == tmp14 tmp16 = tmp0 == tmp14 tmp20 = tmp18 * tmp19 tmp21 = 0.0 tmp22 = tl.where(tmp16, tmp20, tmp21) tmp23 = tl.where(tmp15, tmp22, tmp21) tmp24 = tl.where(tmp9, tmp13, tmp23) tmp25 = tmp1 == tmp14 tmp26 = tl.where(tmp25, tmp22, tmp21) tmp27 = tl.where(tmp8, tmp24, tmp26) tmp28 = tl.where(tmp2, tmp6, tmp27) tl.store(out_ptr0 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c7/cc7ihqsgq5rotyl756evxcxvtv6777phxmntrhx2c7rmoidrzynb.py # Topologically Sorted Source Nodes: [k1], Original ATen: [aten.zeros] # Source node to ATen node mapping: # k1 => full_default_1 # Graph fragment: # %full_default_1 : [num_users=7] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 3, 3], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int, %select_1, 0, 0), kwargs = {}) # %select_scatter_default_1 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_1, %select_scatter_default, 0, 0), kwargs = {}) # %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_1, %select_11, 0, 1), kwargs = {}) # %select_scatter_default_3 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %select_scatter_default_2, 0, 1), kwargs = {}) # %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_2, %select_23, 0, 2), kwargs = {}) # %select_scatter_default_5 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %select_scatter_default_4, 0, 2), kwargs = {}) triton_poi_fused_zeros_3 = async_compile.triton('triton_poi_fused_zeros_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 36) x3 = xindex % 36 x1 = (xindex // 9) % 4 x0 = xindex % 9 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (9 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.full([1], 1, tl.int32) tmp5 = tmp0 == tmp4 tmp6 = x1 tmp7 = tmp6 == tmp4 tmp11 = tmp9 * tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = tmp4 == tmp12 tmp14 = tmp6 == tmp12 tmp18 = tmp16 * tmp17 tmp19 = 0.0 tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tl.where(tmp13, tmp20, tmp19) tmp22 = tl.where(tmp7, tmp11, tmp21) tmp23 = tmp0 == tmp12 tmp24 = tl.where(tmp23, tmp20, tmp19) tmp25 = tl.where(tmp5, tmp22, tmp24) tmp26 = tl.where(tmp2, tmp3, tmp25) tl.store(out_ptr0 + (x5), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5x/c5xceeeyore456ayu22cfnjvtsmgwjyflxey6ofmssylko3q6lfr.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_3, %select_35, 0, 3), kwargs = {}) # %select_scatter_default_7 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %select_scatter_default_6, 0, 3), kwargs = {}) triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 36) x1 = (xindex // 9) % 4 x0 = xindex % 9 x4 = xindex % 36 x5 = xindex tmp5 = tl.load(in_ptr0 + (3)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp7 = tl.load(in_ptr1 + (27 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (108 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tmp4 = tmp3 == tmp1 tmp8 = tmp6 * tmp7 tmp10 = tl.where(tmp4, tmp8, tmp9) tmp12 = tl.where(tmp2, tmp10, tmp11) tl.store(out_ptr0 + (x5), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ki/cki7wq63h3z7dozphgay3ajw62gsf4cu7vseimq4nhjx4q3z7wfq.py # Topologically Sorted Source Nodes: [add_4, add_5, add_6, RK_4], Original ATen: [aten.add] # Source node to ATen node mapping: # RK_4 => add_7 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %convolution_2), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %convolution_4), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %convolution_6), kwargs = {}) triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_out_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp5 = tl.load(in_ptr2 + (x0), xmask) tmp7 = tl.load(in_ptr3 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tl.store(in_out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cf/ccfkblyhfc7n6sy4opvjgvuymz54pzopsbopwb5m37uo5c26imhp.py # Topologically Sorted Source Nodes: [RB_1, RB_3, RB_5, RB_7, add_8, add_9, add_10, RB_8], Original ATen: [aten.add] # Source node to ATen node mapping: # RB_1 => add # RB_3 => add_1 # RB_5 => add_2 # RB_7 => add_3 # RB_8 => add_11 # add_10 => add_10 # add_8 => add_8 # add_9 => add_9 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_6), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %primals_15), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %primals_20), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %add), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %add_1), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %add_2), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %add_3), kwargs = {}) triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask) tmp5 = tl.load(in_out_ptr0 + (x0), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask) tmp9 = tl.load(in_ptr4 + (x0), xmask) tmp10 = tl.load(in_ptr5 + (x0), xmask) tmp13 = tl.load(in_ptr6 + (x0), xmask) tmp14 = tl.load(in_ptr7 + (x0), xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tmp11 = tmp9 + tmp10 tmp12 = tmp8 + tmp11 tmp15 = tmp13 + tmp14 tmp16 = tmp12 + tmp15 tl.store(in_out_ptr0 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5m/c5m2qu7dz2ebkep5znwbczbqsg4mko372sguwxrfwe3e75rq7lvp.py # Topologically Sorted Source Nodes: [y, y_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # y => convolution_8 # y_1 => gt, mul_7, where # Graph fragment: # %convolution_8 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_22, %add_7, %add_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, %convolution_8), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_8, %mul_7), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_7 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_9, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_13, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_14, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_18, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_19, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_20, (4, ), (1, )) assert_size_stride(primals_21, (4, ), (1, )) assert_size_stride(primals_22, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_23, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [RK], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_3, buf0, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [RK], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 3, 3), (36, 9, 3, 1)) buf2 = empty_strided_cuda((1, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [ones, RB], Original ATen: [aten.ones, aten.mul] triton_poi_fused_mul_ones_1.run(primals_5, buf2, 36, grid=grid(36), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1)) buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_8, primals_9, buf4, 36, grid=grid(36), stream=stream0) buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [k1], Original ATen: [aten.zeros] triton_poi_fused_zeros_3.run(buf4, primals_8, primals_9, buf5, 144, grid=grid(144), stream=stream0) buf6 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, primals_9, buf5, buf6, 144, grid=grid(144), stream=stream0) del primals_8 buf7 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [RK_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_7, buf7, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [RK_1], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 3, 3), (36, 9, 3, 1)) buf9 = reinterpret_tensor(buf4, (1, 4, 3, 3), (36, 9, 3, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [ones, RB_2], Original ATen: [aten.ones, aten.mul] triton_poi_fused_mul_ones_1.run(primals_11, buf9, 36, grid=grid(36), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (1, 4, 1, 1), (4, 1, 1, 1)) buf11 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_13, primals_14, buf11, 36, grid=grid(36), stream=stream0) buf12 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [k1], Original ATen: [aten.zeros] triton_poi_fused_zeros_3.run(buf11, primals_13, primals_14, buf12, 144, grid=grid(144), stream=stream0) buf13 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_13, primals_14, buf12, buf13, 144, grid=grid(144), stream=stream0) del primals_13 buf14 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [RK_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_12, buf14, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [RK_2], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf13, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 3, 3), (36, 9, 3, 1)) buf16 = reinterpret_tensor(buf11, (1, 4, 3, 3), (36, 9, 3, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [ones, RB_4], Original ATen: [aten.ones, aten.mul] triton_poi_fused_mul_ones_1.run(primals_16, buf16, 36, grid=grid(36), stream=stream0) del primals_16 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf13, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (1, 4, 1, 1), (4, 1, 1, 1)) buf18 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_18, primals_19, buf18, 36, grid=grid(36), stream=stream0) buf19 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [k1], Original ATen: [aten.zeros] triton_poi_fused_zeros_3.run(buf18, primals_18, primals_19, buf19, 144, grid=grid(144), stream=stream0) buf20 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_18, primals_19, buf19, buf20, 144, grid=grid(144), stream=stream0) del buf19 del primals_18 buf21 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [RK_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_17, buf21, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [RK_3], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf20, buf21, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 4, 3, 3), (36, 9, 3, 1)) del buf21 buf23 = reinterpret_tensor(buf18, (1, 4, 3, 3), (36, 9, 3, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [ones, RB_6], Original ATen: [aten.ones, aten.mul] triton_poi_fused_mul_ones_1.run(primals_21, buf23, 36, grid=grid(36), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, buf20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (1, 4, 1, 1), (4, 1, 1, 1)) buf25 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [add_4, add_5, add_6, RK_4], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf25, primals_1, buf8, buf15, buf22, 144, grid=grid(144), stream=stream0) del buf15 del buf22 del buf8 del primals_1 buf26 = reinterpret_tensor(buf10, (4, ), (1, ), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [RB_1, RB_3, RB_5, RB_7, add_8, add_9, add_10, RB_8], Original ATen: [aten.add] triton_poi_fused_add_6.run(buf26, primals_2, buf3, primals_6, primals_10, buf17, primals_15, buf24, primals_20, 4, grid=grid(4), stream=stream0) del buf17 del buf24 del buf3 del primals_10 del primals_15 del primals_2 del primals_20 del primals_6 # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(primals_22, buf25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27; del buf27 # reuse buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y, y_1], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_7.run(buf28, buf26, primals_23, buf29, 256, grid=grid(256), stream=stream0) del buf26 return (buf29, primals_4, primals_9, primals_14, primals_19, primals_22, primals_23, reinterpret_tensor(primals_3, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf2, buf6, reinterpret_tensor(primals_7, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf9, buf13, reinterpret_tensor(primals_12, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf16, buf20, reinterpret_tensor(primals_17, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf23, buf25, buf28, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SeqConv3x3(nn.Module): def __init__(self, seq_type, inp_planes, out_planes, depth_multiplier): super(SeqConv3x3, self).__init__() self.type = seq_type self.inp_planes = inp_planes self.out_planes = out_planes if self.type == 'conv1x1-conv3x3': self.mid_planes = int(out_planes * depth_multiplier) conv0 = torch.nn.Conv2d(self.inp_planes, self.mid_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias conv1 = torch.nn.Conv2d(self.mid_planes, self.out_planes, kernel_size=3) self.k1 = conv1.weight self.b1 = conv1.bias elif self.type == 'conv1x1-sobelx': conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001 self.scale = nn.Parameter(scale) bias = torch.randn(self.out_planes) * 0.001 bias = torch.reshape(bias, (self.out_planes,)) self.bias = nn.Parameter(bias) self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch .float32) for i in range(self.out_planes): self.mask[i, 0, 0, 0] = 1.0 self.mask[i, 0, 1, 0] = 2.0 self.mask[i, 0, 2, 0] = 1.0 self.mask[i, 0, 0, 2] = -1.0 self.mask[i, 0, 1, 2] = -2.0 self.mask[i, 0, 2, 2] = -1.0 self.mask = nn.Parameter(data=self.mask, requires_grad=False) elif self.type == 'conv1x1-sobely': conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001 self.scale = nn.Parameter(torch.FloatTensor(scale)) bias = torch.randn(self.out_planes) * 0.001 bias = torch.reshape(bias, (self.out_planes,)) self.bias = nn.Parameter(torch.FloatTensor(bias)) self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch .float32) for i in range(self.out_planes): self.mask[i, 0, 0, 0] = 1.0 self.mask[i, 0, 0, 1] = 2.0 self.mask[i, 0, 0, 2] = 1.0 self.mask[i, 0, 2, 0] = -1.0 self.mask[i, 0, 2, 1] = -2.0 self.mask[i, 0, 2, 2] = -1.0 self.mask = nn.Parameter(data=self.mask, requires_grad=False) elif self.type == 'conv1x1-laplacian': conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001 self.scale = nn.Parameter(torch.FloatTensor(scale)) bias = torch.randn(self.out_planes) * 0.001 bias = torch.reshape(bias, (self.out_planes,)) self.bias = nn.Parameter(torch.FloatTensor(bias)) self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch .float32) for i in range(self.out_planes): self.mask[i, 0, 0, 1] = 1.0 self.mask[i, 0, 1, 0] = 1.0 self.mask[i, 0, 1, 2] = 1.0 self.mask[i, 0, 2, 1] = 1.0 self.mask[i, 0, 1, 1] = -4.0 self.mask = nn.Parameter(data=self.mask, requires_grad=False) else: raise ValueError('the type of seqconv is not supported!') def forward(self, x): if self.type == 'conv1x1-conv3x3': y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1) y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0) b0_pad = self.b0.view(1, -1, 1, 1) y0[:, :, 0:1, :] = b0_pad y0[:, :, -1:, :] = b0_pad y0[:, :, :, 0:1] = b0_pad y0[:, :, :, -1:] = b0_pad y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1) else: y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1) y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0) b0_pad = self.b0.view(1, -1, 1, 1) y0[:, :, 0:1, :] = b0_pad y0[:, :, -1:, :] = b0_pad y0[:, :, :, 0:1] = b0_pad y0[:, :, :, -1:] = b0_pad y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias= self.bias, stride=1, groups=self.out_planes) return y1 def rep_params(self): device = self.k0.get_device() if device < 0: device = None if self.type == 'conv1x1-conv3x3': RK = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3)) RB = torch.ones(1, self.mid_planes, 3, 3, device=device ) * self.b0.view(1, -1, 1, 1) RB = F.conv2d(input=RB, weight=self.k1).view(-1) + self.b1 else: tmp = self.scale * self.mask k1 = torch.zeros((self.out_planes, self.out_planes, 3, 3), device=device) for i in range(self.out_planes): k1[i, i, :, :] = tmp[i, 0, :, :] b1 = self.bias RK = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3)) RB = torch.ones(1, self.out_planes, 3, 3, device=device ) * self.b0.view(1, -1, 1, 1) RB = F.conv2d(input=RB, weight=k1).view(-1) + b1 return RK, RB class ECB(nn.Module): def __init__(self, inp_planes, out_planes, depth_multiplier, act_type= 'prelu', with_idt=False): super(ECB, self).__init__() self.depth_multiplier = depth_multiplier self.inp_planes = inp_planes self.out_planes = out_planes self.act_type = act_type if with_idt and self.inp_planes == self.out_planes: self.with_idt = True else: self.with_idt = False self.conv3x3 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=3, padding=1) self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.inp_planes, self.out_planes, self.depth_multiplier) self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.inp_planes, self.out_planes, -1) self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.inp_planes, self.out_planes, -1) self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.inp_planes, self.out_planes, -1) if self.act_type == 'prelu': self.act = nn.PReLU(num_parameters=self.out_planes) elif self.act_type == 'relu': self.act = nn.ReLU(inplace=True) elif self.act_type == 'rrelu': self.act = nn.RReLU(lower=-0.05, upper=0.05) elif self.act_type == 'softplus': self.act = nn.Softplus() elif self.act_type == 'linear': pass else: raise ValueError('The type of activation if not support!') def forward(self, x): if self.training: y = self.conv3x3(x) + self.conv1x1_3x3(x) + self.conv1x1_sbx(x ) + self.conv1x1_sby(x) + self.conv1x1_lpl(x) if self.with_idt: y += x else: RK, RB = self.rep_params() y = F.conv2d(input=x, weight=RK, bias=RB, stride=1, padding=1) if self.act_type != 'linear': y = self.act(y) return y def rep_params(self): K0, B0 = self.conv3x3.weight, self.conv3x3.bias K1, B1 = self.conv1x1_3x3.rep_params() K2, B2 = self.conv1x1_sbx.rep_params() K3, B3 = self.conv1x1_sby.rep_params() K4, B4 = self.conv1x1_lpl.rep_params() RK, RB = K0 + K1 + K2 + K3 + K4, B0 + B1 + B2 + B3 + B4 if self.with_idt: device = RK.get_device() if device < 0: device = None K_idt = torch.zeros(self.out_planes, self.out_planes, 3, 3, device=device) for i in range(self.out_planes): K_idt[i, i, 1, 1] = 1.0 B_idt = 0.0 RK, RB = RK + K_idt, RB + B_idt return RK, RB def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inp_planes': 4, 'out_planes': 4, 'depth_multiplier': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_ones_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 9 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 * tmp0 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 9 x0 = xindex % 9 x2 = xindex tmp3 = tl.load(in_ptr0 + 2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp5 = tl.load(in_ptr1 + (18 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + 1) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp12 = tl.load(in_ptr1 + (9 + x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK]) tmp19 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp6 = tmp4 * tmp5 tmp7 = tl.full([1], 1, tl.int32) tmp8 = tmp1 == tmp7 tmp9 = tmp0 == tmp7 tmp13 = tmp11 * tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = tmp7 == tmp14 tmp16 = tmp0 == tmp14 tmp20 = tmp18 * tmp19 tmp21 = 0.0 tmp22 = tl.where(tmp16, tmp20, tmp21) tmp23 = tl.where(tmp15, tmp22, tmp21) tmp24 = tl.where(tmp9, tmp13, tmp23) tmp25 = tmp1 == tmp14 tmp26 = tl.where(tmp25, tmp22, tmp21) tmp27 = tl.where(tmp8, tmp24, tmp26) tmp28 = tl.where(tmp2, tmp6, tmp27) tl.store(out_ptr0 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_zeros_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 36 x3 = xindex % 36 x1 = xindex // 9 % 4 x0 = xindex % 9 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (9 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp17 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.full([1], 1, tl.int32) tmp5 = tmp0 == tmp4 tmp6 = x1 tmp7 = tmp6 == tmp4 tmp11 = tmp9 * tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = tmp4 == tmp12 tmp14 = tmp6 == tmp12 tmp18 = tmp16 * tmp17 tmp19 = 0.0 tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tl.where(tmp13, tmp20, tmp19) tmp22 = tl.where(tmp7, tmp11, tmp21) tmp23 = tmp0 == tmp12 tmp24 = tl.where(tmp23, tmp20, tmp19) tmp25 = tl.where(tmp5, tmp22, tmp24) tmp26 = tl.where(tmp2, tmp3, tmp25) tl.store(out_ptr0 + x5, tmp26, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 36 x1 = xindex // 9 % 4 x0 = xindex % 9 x4 = xindex % 36 x5 = xindex tmp5 = tl.load(in_ptr0 + 3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp7 = tl.load(in_ptr1 + (27 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (108 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tmp4 = tmp3 == tmp1 tmp8 = tmp6 * tmp7 tmp10 = tl.where(tmp4, tmp8, tmp9) tmp12 = tl.where(tmp2, tmp10, tmp11) tl.store(out_ptr0 + x5, tmp12, xmask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask) tmp7 = tl.load(in_ptr3 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tl.store(in_out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp5 = tl.load(in_out_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask) tmp9 = tl.load(in_ptr4 + x0, xmask) tmp10 = tl.load(in_ptr5 + x0, xmask) tmp13 = tl.load(in_ptr6 + x0, xmask) tmp14 = tl.load(in_ptr7 + x0, xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tmp11 = tmp9 + tmp10 tmp12 = tmp8 + tmp11 tmp15 = tmp13 + tmp14 tmp16 = tmp12 + tmp15 tl.store(in_out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_9, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_13, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_14, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_18, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_19, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_20, (4,), (1,)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_23, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(4, 4)](primals_3, buf0, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 3, 3), (36, 9, 3, 1)) buf2 = empty_strided_cuda((1, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_mul_ones_1[grid(36)](primals_5, buf2, 36, XBLOCK= 64, num_warps=1, num_stages=1) del primals_5 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1)) buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused_2[grid(36)](primals_8, primals_9, buf4, 36, XBLOCK =64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_zeros_3[grid(144)](buf4, primals_8, primals_9, buf5, 144, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_4[grid(144)](primals_8, primals_9, buf5, buf6, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf7 = buf0 del buf0 triton_poi_fused_convolution_0[grid(4, 4)](primals_7, buf7, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf8 = extern_kernels.convolution(buf6, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 3, 3), (36, 9, 3, 1)) buf9 = reinterpret_tensor(buf4, (1, 4, 3, 3), (36, 9, 3, 1), 0) del buf4 triton_poi_fused_mul_ones_1[grid(36)](primals_11, buf9, 36, XBLOCK= 64, num_warps=1, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (1, 4, 1, 1), (4, 1, 1, 1)) buf11 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused_2[grid(36)](primals_13, primals_14, buf11, 36, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf5 del buf5 triton_poi_fused_zeros_3[grid(144)](buf11, primals_13, primals_14, buf12, 144, XBLOCK=256, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_4[grid(144)](primals_13, primals_14, buf12, buf13, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf14 = buf7 del buf7 triton_poi_fused_convolution_0[grid(4, 4)](primals_12, buf14, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf15 = extern_kernels.convolution(buf13, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 3, 3), (36, 9, 3, 1)) buf16 = reinterpret_tensor(buf11, (1, 4, 3, 3), (36, 9, 3, 1), 0) del buf11 triton_poi_fused_mul_ones_1[grid(36)](primals_16, buf16, 36, XBLOCK =64, num_warps=1, num_stages=1) del primals_16 buf17 = extern_kernels.convolution(buf16, buf13, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (1, 4, 1, 1), (4, 1, 1, 1)) buf18 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused_2[grid(36)](primals_18, primals_19, buf18, 36, XBLOCK=64, num_warps=1, num_stages=1) buf19 = buf12 del buf12 triton_poi_fused_zeros_3[grid(144)](buf18, primals_18, primals_19, buf19, 144, XBLOCK=256, num_warps=4, num_stages=1) buf20 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_4[grid(144)](primals_18, primals_19, buf19, buf20, 144, XBLOCK=256, num_warps=4, num_stages=1) del buf19 del primals_18 buf21 = buf14 del buf14 triton_poi_fused_convolution_0[grid(4, 4)](primals_17, buf21, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf22 = extern_kernels.convolution(buf20, buf21, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 4, 3, 3), (36, 9, 3, 1)) del buf21 buf23 = reinterpret_tensor(buf18, (1, 4, 3, 3), (36, 9, 3, 1), 0) del buf18 triton_poi_fused_mul_ones_1[grid(36)](primals_21, buf23, 36, XBLOCK =64, num_warps=1, num_stages=1) del primals_21 buf24 = extern_kernels.convolution(buf23, buf20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (1, 4, 1, 1), (4, 1, 1, 1)) buf25 = buf1 del buf1 triton_poi_fused_add_5[grid(144)](buf25, primals_1, buf8, buf15, buf22, 144, XBLOCK=256, num_warps=4, num_stages=1) del buf15 del buf22 del buf8 del primals_1 buf26 = reinterpret_tensor(buf10, (4,), (1,), 0) del buf10 triton_poi_fused_add_6[grid(4)](buf26, primals_2, buf3, primals_6, primals_10, buf17, primals_15, buf24, primals_20, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf17 del buf24 del buf3 del primals_10 del primals_15 del primals_2 del primals_20 del primals_6 buf27 = extern_kernels.convolution(primals_22, buf25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_7[grid(256)](buf28, buf26, primals_23, buf29, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf26 return (buf29, primals_4, primals_9, primals_14, primals_19, primals_22, primals_23, reinterpret_tensor(primals_3, (4, 4, 1, 1), (1, 4, 1, 1 ), 0), buf2, buf6, reinterpret_tensor(primals_7, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf9, buf13, reinterpret_tensor(primals_12, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf16, buf20, reinterpret_tensor(primals_17, (4, 4, 1, 1), (1, 4, 1, 1), 0), buf23, buf25, buf28) class SeqConv3x3(nn.Module): def __init__(self, seq_type, inp_planes, out_planes, depth_multiplier): super(SeqConv3x3, self).__init__() self.type = seq_type self.inp_planes = inp_planes self.out_planes = out_planes if self.type == 'conv1x1-conv3x3': self.mid_planes = int(out_planes * depth_multiplier) conv0 = torch.nn.Conv2d(self.inp_planes, self.mid_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias conv1 = torch.nn.Conv2d(self.mid_planes, self.out_planes, kernel_size=3) self.k1 = conv1.weight self.b1 = conv1.bias elif self.type == 'conv1x1-sobelx': conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001 self.scale = nn.Parameter(scale) bias = torch.randn(self.out_planes) * 0.001 bias = torch.reshape(bias, (self.out_planes,)) self.bias = nn.Parameter(bias) self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch .float32) for i in range(self.out_planes): self.mask[i, 0, 0, 0] = 1.0 self.mask[i, 0, 1, 0] = 2.0 self.mask[i, 0, 2, 0] = 1.0 self.mask[i, 0, 0, 2] = -1.0 self.mask[i, 0, 1, 2] = -2.0 self.mask[i, 0, 2, 2] = -1.0 self.mask = nn.Parameter(data=self.mask, requires_grad=False) elif self.type == 'conv1x1-sobely': conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001 self.scale = nn.Parameter(torch.FloatTensor(scale)) bias = torch.randn(self.out_planes) * 0.001 bias = torch.reshape(bias, (self.out_planes,)) self.bias = nn.Parameter(torch.FloatTensor(bias)) self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch .float32) for i in range(self.out_planes): self.mask[i, 0, 0, 0] = 1.0 self.mask[i, 0, 0, 1] = 2.0 self.mask[i, 0, 0, 2] = 1.0 self.mask[i, 0, 2, 0] = -1.0 self.mask[i, 0, 2, 1] = -2.0 self.mask[i, 0, 2, 2] = -1.0 self.mask = nn.Parameter(data=self.mask, requires_grad=False) elif self.type == 'conv1x1-laplacian': conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=1, padding=0) self.k0 = conv0.weight self.b0 = conv0.bias scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001 self.scale = nn.Parameter(torch.FloatTensor(scale)) bias = torch.randn(self.out_planes) * 0.001 bias = torch.reshape(bias, (self.out_planes,)) self.bias = nn.Parameter(torch.FloatTensor(bias)) self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch .float32) for i in range(self.out_planes): self.mask[i, 0, 0, 1] = 1.0 self.mask[i, 0, 1, 0] = 1.0 self.mask[i, 0, 1, 2] = 1.0 self.mask[i, 0, 2, 1] = 1.0 self.mask[i, 0, 1, 1] = -4.0 self.mask = nn.Parameter(data=self.mask, requires_grad=False) else: raise ValueError('the type of seqconv is not supported!') def forward(self, x): if self.type == 'conv1x1-conv3x3': y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1) y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0) b0_pad = self.b0.view(1, -1, 1, 1) y0[:, :, 0:1, :] = b0_pad y0[:, :, -1:, :] = b0_pad y0[:, :, :, 0:1] = b0_pad y0[:, :, :, -1:] = b0_pad y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1) else: y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1) y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0) b0_pad = self.b0.view(1, -1, 1, 1) y0[:, :, 0:1, :] = b0_pad y0[:, :, -1:, :] = b0_pad y0[:, :, :, 0:1] = b0_pad y0[:, :, :, -1:] = b0_pad y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias= self.bias, stride=1, groups=self.out_planes) return y1 def rep_params(self): device = self.k0.get_device() if device < 0: device = None if self.type == 'conv1x1-conv3x3': RK = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3)) RB = torch.ones(1, self.mid_planes, 3, 3, device=device ) * self.b0.view(1, -1, 1, 1) RB = F.conv2d(input=RB, weight=self.k1).view(-1) + self.b1 else: tmp = self.scale * self.mask k1 = torch.zeros((self.out_planes, self.out_planes, 3, 3), device=device) for i in range(self.out_planes): k1[i, i, :, :] = tmp[i, 0, :, :] b1 = self.bias RK = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3)) RB = torch.ones(1, self.out_planes, 3, 3, device=device ) * self.b0.view(1, -1, 1, 1) RB = F.conv2d(input=RB, weight=k1).view(-1) + b1 return RK, RB class ECBNew(nn.Module): def __init__(self, inp_planes, out_planes, depth_multiplier, act_type= 'prelu', with_idt=False): super(ECBNew, self).__init__() self.depth_multiplier = depth_multiplier self.inp_planes = inp_planes self.out_planes = out_planes self.act_type = act_type if with_idt and self.inp_planes == self.out_planes: self.with_idt = True else: self.with_idt = False self.conv3x3 = torch.nn.Conv2d(self.inp_planes, self.out_planes, kernel_size=3, padding=1) self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.inp_planes, self.out_planes, self.depth_multiplier) self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.inp_planes, self.out_planes, -1) self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.inp_planes, self.out_planes, -1) self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.inp_planes, self.out_planes, -1) if self.act_type == 'prelu': self.act = nn.PReLU(num_parameters=self.out_planes) elif self.act_type == 'relu': self.act = nn.ReLU(inplace=True) elif self.act_type == 'rrelu': self.act = nn.RReLU(lower=-0.05, upper=0.05) elif self.act_type == 'softplus': self.act = nn.Softplus() elif self.act_type == 'linear': pass else: raise ValueError('The type of activation if not support!') def rep_params(self): K0, B0 = self.conv3x3.weight, self.conv3x3.bias K1, B1 = self.conv1x1_3x3.rep_params() K2, B2 = self.conv1x1_sbx.rep_params() K3, B3 = self.conv1x1_sby.rep_params() K4, B4 = self.conv1x1_lpl.rep_params() RK, RB = K0 + K1 + K2 + K3 + K4, B0 + B1 + B2 + B3 + B4 if self.with_idt: device = RK.get_device() if device < 0: device = None K_idt = torch.zeros(self.out_planes, self.out_planes, 3, 3, device=device) for i in range(self.out_planes): K_idt[i, i, 1, 1] = 1.0 B_idt = 0.0 RK, RB = RK + K_idt, RB + B_idt return RK, RB def forward(self, input_0): primals_1 = self.conv3x3.weight primals_2 = self.conv3x3.bias primals_3 = self.conv1x1_3x3.k0 primals_5 = self.conv1x1_3x3.b0 primals_4 = self.conv1x1_3x3.k1 primals_6 = self.conv1x1_3x3.b1 primals_7 = self.conv1x1_sbx.k0 primals_10 = self.conv1x1_sbx.b0 primals_8 = self.conv1x1_sbx.scale primals_11 = self.conv1x1_sbx.bias primals_9 = self.conv1x1_sbx.mask primals_12 = self.conv1x1_sby.k0 primals_15 = self.conv1x1_sby.b0 primals_13 = self.conv1x1_sby.scale primals_16 = self.conv1x1_sby.bias primals_14 = self.conv1x1_sby.mask primals_17 = self.conv1x1_lpl.k0 primals_20 = self.conv1x1_lpl.b0 primals_18 = self.conv1x1_lpl.scale primals_21 = self.conv1x1_lpl.bias primals_19 = self.conv1x1_lpl.mask primals_23 = self.act.weight primals_22 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0]
thinkreed/ECBSR
ECB
false
16,624
[ "Apache-2.0" ]
162
152b9fef9b9fb61b6e5a93677229143652ef305d
https://github.com/thinkreed/ECBSR/tree/152b9fef9b9fb61b6e5a93677229143652ef305d
XTanhLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bf/cbfpzmtt3thtqbgcg6idcccnjwleeio3zodkjlrehbsb3a4uvfqs.py # Topologically Sorted Source Nodes: [ey_t, tanh, mul, mean], Original ATen: [aten.sub, aten.tanh, aten.mul, aten.mean] # Source node to ATen node mapping: # ey_t => sub # mean => mean # mul => mul # tanh => tanh # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%sub,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_mean_mul_sub_tanh_0 = async_compile.triton('triton_per_fused_mean_mul_sub_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sub_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_sub_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [ey_t, tanh, mul, mean], Original ATen: [aten.sub, aten.tanh, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_sub_tanh_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class XTanhLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y_t, y_prime_t): ey_t = y_t - y_prime_t return torch.mean(ey_t * torch.tanh(ey_t)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_sub_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_sub_tanh_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class XTanhLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
tuantle/regression-losses-pytorch
XTanhLoss
false
16,625
[ "MIT" ]
82
2893f4439ada5df239e3afd0ec7e781dd61403e9
https://github.com/tuantle/regression-losses-pytorch/tree/2893f4439ada5df239e3afd0ec7e781dd61403e9
BondEnergyModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ln/cln5sfyboedlzxyul2rcnb6vanfbljaw6d6z6s36rtu4mggrkfeo.py # Topologically Sorted Source Nodes: [getitem_1, getitem_3, sub, pow_1, sum_1], Original ATen: [aten.index, aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # getitem_1 => index # getitem_3 => index_1 # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%select]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%select_1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index, %index_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) triton_poi_fused_index_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_index_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (4*tmp4), xmask, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert(((0 <= tmp10) & (tmp10 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp10 < 4") tmp12 = tl.load(in_ptr1 + (4*tmp10), xmask, eviction_policy='evict_last') tmp13 = tmp6 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.load(in_ptr1 + (1 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + (4*tmp10)), xmask, eviction_policy='evict_last') tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tl.load(in_ptr1 + (2 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (2 + (4*tmp10)), xmask, eviction_policy='evict_last') tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp19 + tmp23 tmp25 = tl.load(in_ptr1 + (3 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (3 + (4*tmp10)), xmask, eviction_policy='evict_last') tmp27 = tmp25 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp24 + tmp28 tl.store(out_ptr0 + (x0), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z3/cz33u7nt7d53a53hdopgw4okpvtip6iipriyj3cyx4udjpihvi75.py # Topologically Sorted Source Nodes: [out, sub_1, pow_2, ebond, scatter_add_], Original ATen: [aten.new_full, aten.sub, aten.pow, aten.mul, aten.scatter_add] # Source node to ATen node mapping: # ebond => mul # out => full_default # pow_2 => pow_2 # scatter_add_ => scatter_add # sub_1 => sub_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %arg2_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg3_1, %pow_2), kwargs = {}) # %scatter_add : [num_users=1] = call_function[target=torch.ops.aten.scatter_add.default](args = (%full_default, 0, %expand, %mul), kwargs = {}) triton_poi_fused_mul_new_full_pow_scatter_add_sub_1 = async_compile.triton('triton_poi_fused_mul_new_full_pow_scatter_add_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_new_full_pow_scatter_add_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_new_full_pow_scatter_add_sub_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sn/csnw2vyjodfo46s6oav2oggcoxfhq32xoh4ie4dmjmxgofhpqrz3.py # Topologically Sorted Source Nodes: [out, sub_1, pow_2, ebond, scatter_add_, out_1, scatter_add__1], Original ATen: [aten.new_full, aten.sub, aten.pow, aten.mul, aten.scatter_add] # Source node to ATen node mapping: # ebond => mul # out => full_default # out_1 => full_default_1 # pow_2 => pow_2 # scatter_add_ => scatter_add # scatter_add__1 => scatter_add_1 # sub_1 => sub_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %arg2_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg3_1, %pow_2), kwargs = {}) # %scatter_add : [num_users=1] = call_function[target=torch.ops.aten.scatter_add.default](args = (%full_default, 0, %expand, %mul), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %scatter_add_1 : [num_users=1] = call_function[target=torch.ops.aten.scatter_add.default](args = (%full_default_1, 0, %expand_1, %mul), kwargs = {}) triton_poi_fused_mul_new_full_pow_scatter_add_sub_2 = async_compile.triton('triton_poi_fused_mul_new_full_pow_scatter_add_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_new_full_pow_scatter_add_sub_2', 'mutated_arg_names': ['out_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_new_full_pow_scatter_add_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x2), xmask) tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tl.device_assert(((0 <= tmp0) & (tmp0 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp0 < 4") tmp4 = tmp3.to(tl.float32) tmp5 = libdevice.sqrt(tmp4) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp2 * tmp8 tl.device_assert(((0 <= tmp10) & (tmp10 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp10 < 4") tl.atomic_add(out_ptr0 + (x0 + (4*tmp0)), tmp9, xmask, sem='relaxed') tl.atomic_add(out_ptr1 + (x0 + (4*tmp10)), tmp9, xmask, sem='relaxed') ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ok/cokgncuf6f7tmgioic3xkrdvnzmffcs5db2uut3ibrcrbhfwkee5.py # Topologically Sorted Source Nodes: [energy, mul_2, energy_1], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # energy => mul_1 # energy_1 => add # mul_2 => mul_2 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_add, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_add_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [getitem_1, getitem_3, sub, pow_1, sum_1], Original ATen: [aten.index, aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_index_pow_sub_sum_0.run(arg0_1, arg1_1, buf0, 4, grid=grid(4), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out, sub_1, pow_2, ebond, scatter_add_], Original ATen: [aten.new_full, aten.sub, aten.pow, aten.mul, aten.scatter_add] triton_poi_fused_mul_new_full_pow_scatter_add_sub_1.run(buf1, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, pow_2, ebond, out_1, scatter_add__1], Original ATen: [aten.sub, aten.pow, aten.mul, aten.new_full, aten.scatter_add] triton_poi_fused_mul_new_full_pow_scatter_add_sub_1.run(buf3, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out, sub_1, pow_2, ebond, scatter_add_, out_1, scatter_add__1], Original ATen: [aten.new_full, aten.sub, aten.pow, aten.mul, aten.scatter_add] triton_poi_fused_mul_new_full_pow_scatter_add_sub_2.run(arg0_1, arg3_1, buf0, arg2_1, buf1, buf3, 16, grid=grid(16), stream=stream0) del arg0_1 del arg2_1 del arg3_1 del buf0 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy, mul_2, energy_1], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(buf1, buf3, buf5, 16, grid=grid(16), stream=stream0) del buf1 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.nn as nn from itertools import repeat def gen(src, index, dim=-1, out=None, dim_size=None, fill_value=0): dim = range(src.dim())[dim] if index.dim() == 1: index_size = list(repeat(1, src.dim())) index_size[dim] = src.size(dim) index = index.view(index_size).expand_as(src) if out is None: dim_size = index.max().item() + 1 if dim_size is None else dim_size out_size = list(src.size()) out_size[dim] = dim_size out = src.new_full(out_size, fill_value) return src, out, index, dim def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0): src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value) return out.scatter_add_(dim, index, src) class BondEnergyModule(nn.Module): def __init__(self, batch=True): super().__init__() def forward(self, xyz, bond_adj, bond_len, bond_par): e = (xyz[bond_adj[:, 0]] - xyz[bond_adj[:, 1]]).pow(2).sum(1).sqrt()[ :, None] ebond = bond_par * (e - bond_len) ** 2 energy = 0.5 * scatter_add(src=ebond, index=bond_adj[:, 0], dim=0, dim_size=xyz.shape[0]) energy += 0.5 * scatter_add(src=ebond, index=bond_adj[:, 1], dim=0, dim_size=xyz.shape[0]) return energy def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype =torch.int64), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn import torch.nn as nn from itertools import repeat assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + 4 * tmp4, xmask, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask, 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr1 + 4 * tmp10, xmask, eviction_policy='evict_last') tmp13 = tmp6 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.load(in_ptr1 + (1 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tl.load(in_ptr1 + (2 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr1 + (2 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp19 + tmp23 tmp25 = tl.load(in_ptr1 + (3 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (3 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp27 = tmp25 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp24 + tmp28 tl.store(out_ptr0 + x0, tmp29, xmask) @triton.jit def triton_poi_fused_mul_new_full_pow_scatter_add_sub_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mul_new_full_pow_scatter_add_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x2, xmask) tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tl.device_assert((0 <= tmp0) & (tmp0 < 4) | ~xmask, 'index out of bounds: 0 <= tmp0 < 4') tmp4 = tmp3.to(tl.float32) tmp5 = libdevice.sqrt(tmp4) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp2 * tmp8 tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask, 'index out of bounds: 0 <= tmp10 < 4') tl.atomic_add(out_ptr0 + (x0 + 4 * tmp0), tmp9, xmask, sem='relaxed') tl.atomic_add(out_ptr1 + (x0 + 4 * tmp10), tmp9, xmask, sem='relaxed') @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_index_pow_sub_sum_0[grid(4)](arg0_1, arg1_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_new_full_pow_scatter_add_sub_1[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_new_full_pow_scatter_add_sub_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) triton_poi_fused_mul_new_full_pow_scatter_add_sub_2[grid(16)](arg0_1, arg3_1, buf0, arg2_1, buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg2_1 del arg3_1 del buf0 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(16)](buf1, buf3, buf5, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf1 del buf3 return buf5, def gen(src, index, dim=-1, out=None, dim_size=None, fill_value=0): dim = range(src.dim())[dim] if index.dim() == 1: index_size = list(repeat(1, src.dim())) index_size[dim] = src.size(dim) index = index.view(index_size).expand_as(src) if out is None: dim_size = index.max().item() + 1 if dim_size is None else dim_size out_size = list(src.size()) out_size[dim] = dim_size out = src.new_full(out_size, fill_value) return src, out, index, dim def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0): src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value) return out.scatter_add_(dim, index, src) class BondEnergyModuleNew(nn.Module): def __init__(self, batch=True): super().__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
torchmd/mdgrad
BondEnergyModule
false
16,626
[ "MIT" ]
54
77bd7685b74b41acf54a9483546e1e8cb545eb01
https://github.com/torchmd/mdgrad/tree/77bd7685b74b41acf54a9483546e1e8cb545eb01
ConvMeanPool
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xh/cxhp46orx2gsfuhjng7sfgbploywh2r52vjo7ajxglqlsqm7tvny.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # input_2 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%convolution, [2, 2], [2, 2], [0, 0], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (9*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (9*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + (9*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + (9*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0) return (buf1, primals_1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def l2normalize(v, esp=1e-08): return v / (v.norm() + esp) def sn_weight(weight, u, height, n_power_iterations): weight.requires_grad_(False) for _ in range(n_power_iterations): v = l2normalize(torch.mv(weight.view(height, -1).t(), u)) u = l2normalize(torch.mv(weight.view(height, -1), v)) weight.requires_grad_(True) sigma = u.dot(weight.view(height, -1).mv(v)) return torch.div(weight, sigma), u def get_nonlinearity(nonlinearity=None): if not nonlinearity: pass elif callable(nonlinearity): if nonlinearity == nn.LeakyReLU: nonlinearity = nonlinearity(0.02, inplace=True) elif hasattr(nn, nonlinearity): nonlinearity = getattr(nn, nonlinearity) if nonlinearity == 'LeakyReLU': nonlinearity = nonlinearity(0.02, inplace=True) else: nonlinearity = nonlinearity() elif hasattr(nn.functional, nonlinearity): nonlinearity = getattr(nn.functional, nonlinearity) else: raise ValueError(nonlinearity) return nonlinearity class SNConv2d(nn.Conv2d): def __init__(self, *args, n_power_iterations=1, **kwargs): super(SNConv2d, self).__init__(*args, **kwargs) self.n_power_iterations = n_power_iterations self.height = self.weight.shape[0] self.register_buffer('u', l2normalize(self.weight.new_empty(self. height).normal_(0, 1))) def forward(self, input): w_sn, self.u = sn_weight(self.weight, self.u, self.height, self. n_power_iterations) return F.conv2d(input, w_sn, self.bias, self.stride, self.padding, self.dilation, self.groups) class ConvMeanPool(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix= '', spectral_norm=False): super(ConvMeanPool, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = 'cmp' + prefix models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias= False)) models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad =False)) if nonlinearity: models.add_module('{}_{}'.format(name, nonlinearity.__class__. __name__), nonlinearity) self.models = models def forward(self, x): x = self.models(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4, 'f_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_1, primals_2, buf0 def l2normalize(v, esp=1e-08): return v / (v.norm() + esp) def sn_weight(weight, u, height, n_power_iterations): weight.requires_grad_(False) for _ in range(n_power_iterations): v = l2normalize(torch.mv(weight.view(height, -1).t(), u)) u = l2normalize(torch.mv(weight.view(height, -1), v)) weight.requires_grad_(True) sigma = u.dot(weight.view(height, -1).mv(v)) return torch.div(weight, sigma), u def get_nonlinearity(nonlinearity=None): if not nonlinearity: pass elif callable(nonlinearity): if nonlinearity == nn.LeakyReLU: nonlinearity = nonlinearity(0.02, inplace=True) elif hasattr(nn, nonlinearity): nonlinearity = getattr(nn, nonlinearity) if nonlinearity == 'LeakyReLU': nonlinearity = nonlinearity(0.02, inplace=True) else: nonlinearity = nonlinearity() elif hasattr(nn.functional, nonlinearity): nonlinearity = getattr(nn.functional, nonlinearity) else: raise ValueError(nonlinearity) return nonlinearity class SNConv2d(nn.Conv2d): def __init__(self, *args, n_power_iterations=1, **kwargs): super(SNConv2d, self).__init__(*args, **kwargs) self.n_power_iterations = n_power_iterations self.height = self.weight.shape[0] self.register_buffer('u', l2normalize(self.weight.new_empty(self. height).normal_(0, 1))) def forward(self, input): w_sn, self.u = sn_weight(self.weight, self.u, self.height, self. n_power_iterations) return F.conv2d(input, w_sn, self.bias, self.stride, self.padding, self.dilation, self.groups) class ConvMeanPoolNew(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix= '', spectral_norm=False): super(ConvMeanPoolNew, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = 'cmp' + prefix models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias= False)) models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad =False)) if nonlinearity: models.add_module('{}_{}'.format(name, nonlinearity.__class__. __name__), nonlinearity) self.models = models def forward(self, input_0): primals_1 = self.models.cmp.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
tsirif/cortex
ConvMeanPool
false
16,627
[ "BSD-3-Clause" ]
109
2837b220f9fb73279df3815bb18b274106412c08
https://github.com/tsirif/cortex/tree/2837b220f9fb73279df3815bb18b274106412c08
XSigmoidLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lu/clugiqev6ishcwjvdmckbnrpr46lqz46s6elgmmzftibvavy5434.py # Topologically Sorted Source Nodes: [ey_t, mul, sigmoid, mul_1, sub_1, mean], Original ATen: [aten.sub, aten.mul, aten.sigmoid, aten.mean] # Source node to ATen node mapping: # ey_t => sub # mean => mean # mul => mul # mul_1 => mul_1 # sigmoid => sigmoid # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 2), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sigmoid), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_mean_mul_sigmoid_sub_0 = async_compile.triton('triton_per_fused_mean_mul_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = tl.sigmoid(tmp2) tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp2 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [ey_t, mul, sigmoid, mul_1, sub_1, mean], Original ATen: [aten.sub, aten.mul, aten.sigmoid, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_sigmoid_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class XSigmoidLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y_t, y_prime_t): ey_t = y_t - y_prime_t return torch.mean(2 * ey_t * torch.sigmoid(ey_t) - ey_t) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = tl.sigmoid(tmp2) tmp6 = tmp4 * tmp5 tmp7 = tmp6 - tmp2 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_sigmoid_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class XSigmoidLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
tuantle/regression-losses-pytorch
XSigmoidLoss
false
16,628
[ "MIT" ]
82
2893f4439ada5df239e3afd0ec7e781dd61403e9
https://github.com/tuantle/regression-losses-pytorch/tree/2893f4439ada5df239e3afd0ec7e781dd61403e9
BiAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wm/cwm4p6i4onfgzirdknbkkkkalyi643y6ynzifzggmplzobgfpkd4.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gb/cgbvhbwg6zjbgpseylzouy4kgopkhyzopan7swcnpk23a2o45zr5.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # output_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oz/cozxojbwhgofvogqiimshn6ijmzvg2zn4rlyuttbab5raacnhzht.py # Topologically Sorted Source Nodes: [add, add_1, output_2], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # output_2 => add_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %unsqueeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_6), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x2 + (4*x1) + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_5, buf3, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_2, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [add, add_1, output_2], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf7, buf0, buf1, primals_6, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_6 return (buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Optional import torch.nn as nn from torch.nn.parameter import Parameter class BiAttention(nn.Module): def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int', num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None: super(BiAttention, self).__init__() self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.num_labels = num_labels self.biaffine = biaffine self.W_e = Parameter(torch.Tensor(self.num_labels, self. input_size_encoder)) self.W_d = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder, self.input_size_encoder)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self) ->None: nn.init.xavier_uniform_(self.W_e) nn.init.xavier_uniform_(self.W_d) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_d: 'torch.Tensor', input_e: 'torch.Tensor', mask_d: 'Optional[torch.Tensor]'=None, mask_e: 'Optional[torch.Tensor]'=None) ->torch.Tensor: assert input_d.size(0) == input_e.size(0) _batch, _length_decoder, _ = input_d.size() _, _length_encoder, _ = input_e.size() out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3) out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2) if self.biaffine: output = torch.matmul(input_d.unsqueeze(1), self.U) output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3)) output = output + out_d + out_e + self.b else: output = out_d + out_d + self.b if mask_d is not None: output = output * mask_d.unsqueeze(1).unsqueeze(3 ) * mask_e.unsqueeze(1).unsqueeze(2) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size_encoder': 4, 'input_size_decoder': 4, 'num_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_5, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = buf3 del buf3 triton_poi_fused_clone_2[grid(256)](primals_2, buf5, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_add_3[grid(256)](buf7, buf0, buf1, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_6 return buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0) class BiAttentionNew(nn.Module): def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int', num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None: super(BiAttentionNew, self).__init__() self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.num_labels = num_labels self.biaffine = biaffine self.W_e = Parameter(torch.Tensor(self.num_labels, self. input_size_encoder)) self.W_d = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder, self.input_size_encoder)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self) ->None: nn.init.xavier_uniform_(self.W_e) nn.init.xavier_uniform_(self.W_d) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_0, input_1): primals_3 = self.W_e primals_4 = self.W_d primals_6 = self.b primals_1 = self.U primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
tucan9389/KLUE-baseline
BiAttention
false
16,629
[ "Apache-2.0" ]
71
add61158e61f86adfca65087237443828b650090
https://github.com/tucan9389/KLUE-baseline/tree/add61158e61f86adfca65087237443828b650090
AlgebraicLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lu/cluouk7gdqrnj4u4lo6ezibsqwwpiuk3qpwvvj37hsvbls533bqv.py # Topologically Sorted Source Nodes: [ey_t, mul, mul_1, add, sqrt, truediv, mean], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.div, aten.mean] # Source node to ATen node mapping: # add => add # ey_t => sub # mean => mean # mul => mul # mul_1 => mul_1 # sqrt => sqrt # truediv => div # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused_add_div_mean_mul_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [ey_t, mul, mul_1, add, sqrt, truediv, mean], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_sqrt_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class AlgebraicLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y_t, y_prime_t): ey_t = y_t - y_prime_t return torch.mean(ey_t * ey_t / torch.sqrt(1 + ey_t * ey_t)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mean_mul_sqrt_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class AlgebraicLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
tuantle/regression-losses-pytorch
AlgebraicLoss
false
16,630
[ "MIT" ]
82
2893f4439ada5df239e3afd0ec7e781dd61403e9
https://github.com/tuantle/regression-losses-pytorch/tree/2893f4439ada5df239e3afd0ec7e781dd61403e9
GPT2Postprocessing
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mz/cmzntnsms6lzyb35yqnfy7vd7osar32jl5popfgqekaoanmhac6c.py # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [lm_logits], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn class GPT2Postprocessing(nn.Module): def __init__(self, config): super().__init__() self.ln_f = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_epsilon) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) def forward(self, hidden_states): hidden_states = self.ln_f(hidden_states) lm_logits = self.lm_head(hidden_states) return lm_logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_epsilon=1, vocab_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class GPT2PostprocessingNew(nn.Module): def __init__(self, config): super().__init__() self.ln_f = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_epsilon) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) def forward(self, input_0): primals_1 = self.ln_f.weight primals_2 = self.ln_f.bias primals_4 = self.lm_head.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
tunib-ai/large-scale-lm-tutorials
GPT2Postprocessing
false
16,631
[ "Apache-2.0" ]
128
ca29ff9f945a59abcc3e3f1000c4d83de97973d4
https://github.com/tunib-ai/large-scale-lm-tutorials/tree/ca29ff9f945a59abcc3e3f1000c4d83de97973d4
KLDivergenceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ak/cakm5e6lwgvo4ch6lbxjmcmcar3oco47d3xyxuief4vqkwlft225.py # Topologically Sorted Source Nodes: [sub, alpha, mul, alpha_tilde], Original ATen: [aten.rsub, aten.add, aten.mul] # Source node to ATen node mapping: # alpha => add # alpha_tilde => add_1 # mul => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %add), kwargs = {}) # %add_1 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %mul), kwargs = {}) triton_poi_fused_add_mul_rsub_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 + tmp1 tmp5 = tmp2 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fs/cfslznycxlykpj42fqvbg2sgr2rr5ofx6xol4bhm6igaw3efhsg5.py # Topologically Sorted Source Nodes: [strength_tilde], Original ATen: [aten.sum] # Source node to ATen node mapping: # strength_tilde => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [-1]), kwargs = {}) triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2i/c2izsdx2sqgkyo4pbm7trfgkofg2h276vcut2cdf4alkpa2abztc.py # Topologically Sorted Source Nodes: [sub, alpha, mul, alpha_tilde, sum_2, lgamma, lgamma_1, sub_1, lgamma_2, sum_3, first, sub_3, sub_4, mul_1, second, loss, mean], Original ATen: [aten.rsub, aten.add, aten.mul, aten.sum, aten.lgamma, aten.sub, aten.mean] # Source node to ATen node mapping: # alpha => add # alpha_tilde => add_1 # first => sub_2 # lgamma => lgamma # lgamma_1 => full_default # lgamma_2 => lgamma_2 # loss => add_2 # mean => mean # mul => mul # mul_1 => mul_1 # second => sum_4 # sub => sub # sub_1 => sub_1 # sub_3 => sub_3 # sub_4 => sub_4 # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %add), kwargs = {}) # %add_1 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %mul), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [-1]), kwargs = {}) # %lgamma : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%sum_2,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.7917594909667969), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%lgamma, %full_default), kwargs = {}) # %lgamma_2 : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%add_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%lgamma_2, [-1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %sum_3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, 1), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%digamma, %unsqueeze), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_4), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, %sum_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2 = async_compile.triton('triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 4 r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + ((4*r1) + (16*r3)), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (1 + (4*r1) + (16*r3)), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (2 + (4*r1) + (16*r3)), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (3 + (4*r1) + (16*r3)), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 + tmp1 tmp5 = tmp2 * tmp4 tmp6 = tmp0 + tmp5 tmp8 = tmp1 - tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 * tmp10 tmp12 = tmp7 + tmp11 tmp13 = tmp6 + tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 + tmp1 tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp13 + tmp19 tmp22 = tmp1 - tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp22 * tmp24 tmp26 = tmp21 + tmp25 tmp27 = tmp20 + tmp26 tmp28 = libdevice.lgamma(tmp27) tmp29 = 1.7917594909667969 tmp30 = tmp28 - tmp29 tmp31 = libdevice.lgamma(tmp6) tmp32 = libdevice.lgamma(tmp12) tmp33 = tmp31 + tmp32 tmp34 = libdevice.lgamma(tmp19) tmp35 = tmp33 + tmp34 tmp36 = libdevice.lgamma(tmp26) tmp37 = tmp35 + tmp36 tmp38 = tmp6 - tmp1 tmp41 = tmp39 - tmp40 tmp42 = tmp38 * tmp41 tmp43 = tmp12 - tmp1 tmp46 = tmp44 - tmp45 tmp47 = tmp43 * tmp46 tmp48 = tmp42 + tmp47 tmp49 = tmp19 - tmp1 tmp52 = tmp50 - tmp51 tmp53 = tmp49 * tmp52 tmp54 = tmp48 + tmp53 tmp55 = tmp26 - tmp1 tmp58 = tmp56 - tmp57 tmp59 = tmp55 * tmp58 tmp60 = tmp54 + tmp59 tmp61 = tmp30 - tmp37 tmp62 = tmp61 + tmp60 tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK]) tmp65 = tl.sum(tmp63, 1)[:, None] tmp66 = 64.0 tmp67 = tmp65 / tmp66 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp67, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, alpha, mul, alpha_tilde], Original ATen: [aten.rsub, aten.add, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_0.run(arg1_1, arg0_1, buf2, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [sub, alpha, mul, alpha_tilde, digamma], Original ATen: [aten.rsub, aten.add, aten.mul, aten.digamma] buf3 = torch.ops.aten.digamma.default(buf2) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [strength_tilde], Original ATen: [aten.sum] triton_poi_fused_sum_1.run(buf2, buf5, 64, grid=grid(64), stream=stream0) del buf2 # Topologically Sorted Source Nodes: [strength_tilde, digamma_1], Original ATen: [aten.sum, aten.digamma] buf6 = torch.ops.aten.digamma.default(buf5) del buf5 buf7 = buf6 del buf6 buf9 = empty_strided_cuda((), (), torch.float32) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [sub, alpha, mul, alpha_tilde, sum_2, lgamma, lgamma_1, sub_1, lgamma_2, sum_3, first, sub_3, sub_4, mul_1, second, loss, mean], Original ATen: [aten.rsub, aten.add, aten.mul, aten.sum, aten.lgamma, aten.sub, aten.mean] triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2.run(buf10, arg1_1, arg0_1, buf4, buf7, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del buf4 del buf7 return (buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.utils.data import torch.nn.functional import torch.autograd class KLDivergenceLoss(Module): """ <a id="KLDivergenceLoss"></a> ## KL Divergence Regularization Loss This tries to shrink the total evidence to zero if the sample cannot be correctly classified. First we calculate $ ilde{lpha}_k = y_k + (1 - y_k) extcolor{orange}{lpha_k}$ the Dirichlet parameters after remove the correct evidence. egin{align} &KL \\Big[ D(\\mathbf{p} ert \\mathbf{ ilde{lpha}}) \\Big \\Vert D(\\mathbf{p} ert <1, \\dots, 1>\\Big] \\ &= \\log \\Bigg( rac{\\Gamma \\Big( \\sum_{k=1}^K ilde{lpha}_k \\Big)} {\\Gamma(K) \\prod_{k=1}^K \\Gamma( ilde{lpha}_k)} \\Bigg) + \\sum_{k=1}^K ( ilde{lpha}_k - 1) \\Big[ \\psi( ilde{lpha}_k) - \\psi( ilde{S}) \\Big] \\end{align} where $\\Gamma(\\cdot)$ is the gamma function, $\\psi(\\cdot)$ is the $digamma$ function and $ ilde{S} = \\sum_{k=1}^K ilde{lpha}_k$ """ def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'): """ * `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]` * `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]` """ alpha = evidence + 1.0 n_classes = evidence.shape[-1] alpha_tilde = target + (1 - target) * alpha strength_tilde = alpha_tilde.sum(dim=-1) first = torch.lgamma(alpha_tilde.sum(dim=-1)) - torch.lgamma( alpha_tilde.new_tensor(float(n_classes))) - torch.lgamma( alpha_tilde).sum(dim=-1) second = ((alpha_tilde - 1) * (torch.digamma(alpha_tilde) - torch. digamma(strength_tilde)[:, None])).sum(dim=-1) loss = first + second return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 + tmp1 tmp5 = tmp2 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 4 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr3 + (4 * r1 + 16 * r3), None, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (1 + 4 * r1 + 16 * r3), None, eviction_policy ='evict_last') tmp50 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (2 + 4 * r1 + 16 * r3), None, eviction_policy ='evict_last') tmp56 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr3 + (3 + 4 * r1 + 16 * r3), None, eviction_policy ='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 + tmp1 tmp5 = tmp2 * tmp4 tmp6 = tmp0 + tmp5 tmp8 = tmp1 - tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 * tmp10 tmp12 = tmp7 + tmp11 tmp13 = tmp6 + tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 + tmp1 tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp13 + tmp19 tmp22 = tmp1 - tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp22 * tmp24 tmp26 = tmp21 + tmp25 tmp27 = tmp20 + tmp26 tmp28 = libdevice.lgamma(tmp27) tmp29 = 1.7917594909667969 tmp30 = tmp28 - tmp29 tmp31 = libdevice.lgamma(tmp6) tmp32 = libdevice.lgamma(tmp12) tmp33 = tmp31 + tmp32 tmp34 = libdevice.lgamma(tmp19) tmp35 = tmp33 + tmp34 tmp36 = libdevice.lgamma(tmp26) tmp37 = tmp35 + tmp36 tmp38 = tmp6 - tmp1 tmp41 = tmp39 - tmp40 tmp42 = tmp38 * tmp41 tmp43 = tmp12 - tmp1 tmp46 = tmp44 - tmp45 tmp47 = tmp43 * tmp46 tmp48 = tmp42 + tmp47 tmp49 = tmp19 - tmp1 tmp52 = tmp50 - tmp51 tmp53 = tmp49 * tmp52 tmp54 = tmp48 + tmp53 tmp55 = tmp26 - tmp1 tmp58 = tmp56 - tmp57 tmp59 = tmp55 * tmp58 tmp60 = tmp54 + tmp59 tmp61 = tmp30 - tmp37 tmp62 = tmp61 + tmp60 tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK]) tmp65 = tl.sum(tmp63, 1)[:, None] tmp66 = 64.0 tmp67 = tmp65 / tmp66 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp67, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_0[grid(256)](arg1_1, arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = torch.ops.aten.digamma.default(buf2) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_sum_1[grid(64)](buf2, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf6 = torch.ops.aten.digamma.default(buf5) del buf5 buf7 = buf6 del buf6 buf9 = empty_strided_cuda((), (), torch.float32) buf10 = buf9 del buf9 triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2[grid(1)](buf10, arg1_1, arg0_1, buf4, buf7, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf4 del buf7 return buf10, class KLDivergenceLossNew(Module): """ <a id="KLDivergenceLoss"></a> ## KL Divergence Regularization Loss This tries to shrink the total evidence to zero if the sample cannot be correctly classified. First we calculate $ ilde{lpha}_k = y_k + (1 - y_k) extcolor{orange}{lpha_k}$ the Dirichlet parameters after remove the correct evidence. egin{align} &KL \\Big[ D(\\mathbf{p} ert \\mathbf{ ilde{lpha}}) \\Big \\Vert D(\\mathbf{p} ert <1, \\dots, 1>\\Big] \\ &= \\log \\Bigg( rac{\\Gamma \\Big( \\sum_{k=1}^K ilde{lpha}_k \\Big)} {\\Gamma(K) \\prod_{k=1}^K \\Gamma( ilde{lpha}_k)} \\Bigg) + \\sum_{k=1}^K ( ilde{lpha}_k - 1) \\Big[ \\psi( ilde{lpha}_k) - \\psi( ilde{S}) \\Big] \\end{align} where $\\Gamma(\\cdot)$ is the gamma function, $\\psi(\\cdot)$ is the $digamma$ function and $ ilde{S} = \\sum_{k=1}^K ilde{lpha}_k$ """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
KLDivergenceLoss
false
16,632
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
Conv2dZeroInit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/b5/cb5h536lfxehnv2ezobudfl5wugu2y6mu444yw7yei4n22rp33zu.py # Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # mul_1 => mul_1 # out => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3.0), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {}) triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn class Conv2dZeroInit(nn.Conv2d): def __init__(self, channels_in, channels_out, filter_size, stride=1, padding=0, logscale=3.0): super().__init__(channels_in, channels_out, filter_size, stride= stride, padding=padding) self.register_parameter('logs', nn.Parameter(torch.zeros( channels_out, 1, 1))) self.logscale_factor = logscale def reset_parameters(self): self.weight.data.zero_() self.bias.data.zero_() def forward(self, input): out = super().forward(input) return out * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels_in': 4, 'channels_out': 4, 'filter_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 class Conv2dZeroInitNew(nn.Conv2d): def __init__(self, channels_in, channels_out, filter_size, stride=1, padding=0, logscale=3.0): super().__init__(channels_in, channels_out, filter_size, stride= stride, padding=padding) self.register_parameter('logs', nn.Parameter(torch.zeros( channels_out, 1, 1))) self.logscale_factor = logscale def reset_parameters(self): self.weight.data.zero_() self.bias.data.zero_() def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_4 = self.logs primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
tychovdo/RevGAN
Conv2dZeroInit
false
16,633
[ "BSD-3-Clause" ]
79
2af25e6a8176eaab3d424db45fb6ee2cfc5dc9a3
https://github.com/tychovdo/RevGAN/tree/2af25e6a8176eaab3d424db45fb6ee2cfc5dc9a3
netmodel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6u/c6uqisdoyak6d6tkodc57kultaxhr4l3kboejc6mikjbvg6t5xmh.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp5 = tl.load(in_ptr0 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp16 = tl.load(in_ptr2 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp0 = x1 + (4*x0) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = 100.0 tmp8 = tmp6 * tmp7 tmp9 = tl.load(in_ptr1 + (4*(x1 + (4*x0))), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tmp14 = tl.full([1], 8, tl.int64) tmp15 = tmp0 < tmp14 tmp18 = 0.1 tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr1 + (1 + (4*((-4) + x1 + (4*x0)))), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp13, tmp21, tmp22) tmp24 = tl.where(tmp4, tmp12, tmp23) tl.store(out_ptr0 + (x2), tmp24, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, primals_2, primals_3, buf0, 8, grid=grid(8), stream=stream0) del primals_1 del primals_3 return (buf0, reinterpret_tensor(primals_2, (4, ), (4, ), 0), reinterpret_tensor(primals_2, (4, ), (4, ), 1), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch.nn import Parameter class netmodel(torch.nn.Module): def __init__(self): super(netmodel, self).__init__() self.w0 = Parameter(torch.Tensor(1)) self.w1 = Parameter(torch.Tensor(1)) self.w0.data.uniform_(-1, 1) self.w1.data.uniform_(-1, 1) def forward(self, inputs): y = torch.stack([100 * self.w0 * inputs[:, 0], 0.1 * self.w1 * inputs[:, 1]]) y = torch.t(y) return y.contiguous() def extract_grad(self): tot_size = self.count_parameters() pvec = np.zeros(tot_size, np.float32) count = 0 for param in self.parameters(): sz = param.grad.data.numpy().flatten().shape[0] pvec[count:count + sz] = param.grad.data.numpy().flatten() count += sz return pvec.copy() def extract_parameters(self): tot_size = self.count_parameters() pvec = np.zeros(tot_size, np.float32) count = 0 for param in self.parameters(): sz = param.data.numpy().flatten().shape[0] pvec[count:count + sz] = param.data.numpy().flatten() count += sz return pvec.copy() def inject_parameters(self, pvec): self.count_parameters() count = 0 for param in self.parameters(): sz = param.data.numpy().flatten().shape[0] raw = pvec[count:count + sz] reshaped = raw.reshape(param.data.numpy().shape) param.data = torch.from_numpy(reshaped) count += sz return pvec def count_parameters(self): count = 0 for param in self.parameters(): count += param.data.numpy().flatten().shape[0] return count def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp16 = tl.load(in_ptr2 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp0 = x1 + 4 * x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = 100.0 tmp8 = tmp6 * tmp7 tmp9 = tl.load(in_ptr1 + 4 * (x1 + 4 * x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp18 = 0.1 tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr1 + (1 + 4 * (-4 + x1 + 4 * x0)), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp13, tmp21, tmp22) tmp24 = tl.where(tmp4, tmp12, tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2), (2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(8)](primals_1, primals_2, primals_3, buf0, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_1 del primals_3 return buf0, reinterpret_tensor(primals_2, (4,), (4,), 0 ), reinterpret_tensor(primals_2, (4,), (4,), 1) class netmodelNew(torch.nn.Module): def __init__(self): super(netmodelNew, self).__init__() self.w0 = Parameter(torch.Tensor(1)) self.w1 = Parameter(torch.Tensor(1)) self.w0.data.uniform_(-1, 1) self.w1.data.uniform_(-1, 1) def extract_grad(self): tot_size = self.count_parameters() pvec = np.zeros(tot_size, np.float32) count = 0 for param in self.parameters(): sz = param.grad.data.numpy().flatten().shape[0] pvec[count:count + sz] = param.grad.data.numpy().flatten() count += sz return pvec.copy() def extract_parameters(self): tot_size = self.count_parameters() pvec = np.zeros(tot_size, np.float32) count = 0 for param in self.parameters(): sz = param.data.numpy().flatten().shape[0] pvec[count:count + sz] = param.data.numpy().flatten() count += sz return pvec.copy() def inject_parameters(self, pvec): self.count_parameters() count = 0 for param in self.parameters(): sz = param.data.numpy().flatten().shape[0] raw = pvec[count:count + sz] reshaped = raw.reshape(param.data.numpy().shape) param.data = torch.from_numpy(reshaped) count += sz return pvec def count_parameters(self): count = 0 for param in self.parameters(): count += param.data.numpy().flatten().shape[0] return count def forward(self, input_0): primals_1 = self.w0 primals_3 = self.w1 primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
uber-common/safemutations
netmodel
false
16,634
[ "MIT" ]
91
40e5fd03a244f89bf157d4bedf79201e706aedc1
https://github.com/uber-common/safemutations/tree/40e5fd03a244f89bf157d4bedf79201e706aedc1
DSC_loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jf/cjfduj4aoafs2zdnhlqgoziqbas75hcrxx3dup25kofd3dncr3xu.py # Topologically Sorted Source Nodes: [mul, sum_1, add_1, sum_2], Original ATen: [aten.mul, aten.sum, aten.add] # Source node to ATen node mapping: # add_1 => add_1 # mul => mul # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {}) triton_per_fused_add_mul_sum_0 = async_compile.triton('triton_per_fused_add_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 + tmp1 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/av/caveveath64djs35ok3exidhg627rkehcajxu6txrjypmfepefwp.py # Topologically Sorted Source Nodes: [mul_1, add, add_2, DSC, sum_3, truediv_1, sub], Original ATen: [aten.mul, aten.add, aten.div, aten.sum, aten.rsub] # Source node to ATen node mapping: # DSC => div # add => add # add_2 => add_2 # mul_1 => mul_1 # sub => sub # sum_3 => sum_3 # truediv_1 => div_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-06), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp3 tmp7 = tmp4 / tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 0.25 tmp12 = tmp10 * tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1, add_1, sum_2], Original ATen: [aten.mul, aten.sum, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul_1, add, add_2, DSC, sum_3, truediv_1, sub], Original ATen: [aten.mul, aten.add, aten.div, aten.sum, aten.rsub] triton_per_fused_add_div_mul_rsub_sum_1.run(buf3, buf0, buf1, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DSC_loss(nn.Module): def __init__(self): super(DSC_loss, self).__init__() self.epsilon = 1e-06 return def forward(self, pred, target): batch_num = pred.shape[0] pred = pred.contiguous().view(batch_num, -1) target = target.contiguous().view(batch_num, -1) DSC = (2 * (pred * target).sum(1) + self.epsilon) / ((pred + target ).sum(1) + self.epsilon) return 1 - DSC.sum() / float(batch_num) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 + tmp1 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp3 tmp7 = tmp4 / tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 0.25 tmp12 = tmp10 * tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_add_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf3, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, class DSC_lossNew(nn.Module): def __init__(self): super(DSC_lossNew, self).__init__() self.epsilon = 1e-06 return def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
twni2016/OrganSegRSTN_PyTorch
DSC_loss
false
16,635
[ "MIT" ]
100
bf571320e718c8f138e04d48645e3b4dfe75801d
https://github.com/twni2016/OrganSegRSTN_PyTorch/tree/bf571320e718c8f138e04d48645e3b4dfe75801d
ConformerConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bs/cbstxeghddltznr7shuzsnth6ngv6mnftr2w7pqzzm5flm72plbl.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xd/cxdkgesb6qc4hbcvbeerj4gwfolzo6uujsc7nr7gmmdljjywjmd6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2f/c2fbrnmwutoz3lg6i3ej5xgc7434bcb7tt67cl3pv4byjybrp5cg.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.glu] # Source node to ATen node mapping: # x_2 => glu # Graph fragment: # %glu : [num_users=2] = call_function[target=torch.ops.aten.glu.default](args = (%convolution, 1), kwargs = {}) triton_poi_fused_glu_2 = async_compile.triton('triton_poi_fused_glu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (32*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (32*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/he/chegxomcbtt6hrgtktw3p5v3t6dte24xroou3x43tjnss6fcsnd3.py # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_3 => convolution_1 # x_4 => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%glu, %primals_4, %primals_5, [1], [0], [1], False, [0], 4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/py/cpyevk5ui6utpplpcxxmaefjaw6flnsq34pe5bc3kggnsp3mdm5q.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4), (32, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 128, grid=grid(128), stream=stream0) del primals_3 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.glu] triton_poi_fused_glu_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf5, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4), (16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf7, primals_7, 64, grid=grid(64), stream=stream0) del primals_7 return (reinterpret_tensor(buf7, (4, 4, 4), (1, 16, 4), 0), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torch.cuda class ConformerConvBlock(nn.Module): def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True): super(ConformerConvBlock, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias =bias) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias) self.activation = activation self.reset_parameters() def reset_parameters(self): nn.init.kaiming_normal_(self.pointwise_conv1.weight, nonlinearity= 'relu') nn.init.kaiming_normal_(self.depthwise_conv.weight, nonlinearity='relu' ) nn.init.kaiming_normal_(self.pointwise_conv2.weight, nonlinearity= 'relu') nn.init.constant_(self.pointwise_conv1.bias, 0) nn.init.constant_(self.pointwise_conv2.bias, 0) nn.init.constant_(self.depthwise_conv.bias, 0) def forward(self, x): """ :param x: [seq_len x bsz x hidden_size] :return: """ x = x.transpose(0, 1).transpose(1, 2) x = self.pointwise_conv1(x) x = F.glu(x, dim=1) x = self.depthwise_conv(x) x = self.activation(x) x = self.pointwise_conv2(x) x = x.transpose(1, 2).transpose(0, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'kernel_size': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4), (32, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(128)](buf2, primals_3, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = buf0 del buf0 triton_poi_fused_glu_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_3[grid(64)](buf5, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4), (16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_4[grid(64)](buf7, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return reinterpret_tensor(buf7, (4, 4, 4), (1, 16, 4), 0 ), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, ( 4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5 class ConformerConvBlockNew(nn.Module): def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True): super(ConformerConvBlockNew, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias =bias) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias) self.activation = activation self.reset_parameters() def reset_parameters(self): nn.init.kaiming_normal_(self.pointwise_conv1.weight, nonlinearity= 'relu') nn.init.kaiming_normal_(self.depthwise_conv.weight, nonlinearity='relu' ) nn.init.kaiming_normal_(self.pointwise_conv2.weight, nonlinearity= 'relu') nn.init.constant_(self.pointwise_conv1.bias, 0) nn.init.constant_(self.pointwise_conv2.bias, 0) nn.init.constant_(self.depthwise_conv.bias, 0) def forward(self, input_0): primals_2 = self.pointwise_conv1.weight primals_3 = self.pointwise_conv1.bias primals_4 = self.depthwise_conv.weight primals_5 = self.depthwise_conv.bias primals_6 = self.pointwise_conv2.weight primals_7 = self.pointwise_conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
tuannamnguyen93/NMTGMinor
ConformerConvBlock
false
16,636
[ "MIT" ]
75
acde3454343bda7060fae541c110d0ad1a8ac4f4
https://github.com/tuannamnguyen93/NMTGMinor/tree/acde3454343bda7060fae541c110d0ad1a8ac4f4
N2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qu/cqus46ccg6zxy23dy53q5ruz2v7a6tr32ti6ki3ofym6a7x3xwoa.py # Topologically Sorted Source Nodes: [norm, pow_1, sum_1, mul, norm_1, norm_2, pow_2, sum_2, mul_1, norm_3, norm_4, pow_3, sum_3, mul_2, norm_5, norm_6, pow_4, sum_4, mul_3, norm_7, truediv], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.sum, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # norm => pow_1, pow_2, sum_1 # norm_1 => add # norm_2 => pow_4, pow_5, sum_3 # norm_3 => add_1 # norm_4 => pow_7, pow_8, sum_5 # norm_5 => add_2 # norm_6 => pow_10, pow_11, sum_7 # norm_7 => add_3 # pow_1 => pow_3 # pow_2 => pow_6 # pow_3 => pow_9 # pow_4 => pow_12 # sum_1 => sum_2 # sum_2 => sum_4 # sum_3 => sum_6 # sum_4 => sum_8 # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 3), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [1]), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_5, 3), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_6,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_1), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_2, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1]), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_8, 3), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_9,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {}) # %pow_10 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_10, [1]), kwargs = {}) # %pow_11 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_7, 0.5), kwargs = {}) # %pow_12 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_11, 3), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_12,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 4), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), None) tmp2 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), None) tmp5 = tl.load(in_ptr0 + (8 + r0 + (16*r1)), None) tmp8 = tl.load(in_ptr0 + (12 + r0 + (16*r1)), None) tmp17 = tl.load(in_ptr0 + (64 + r0 + (16*r1)), None) tmp19 = tl.load(in_ptr0 + (68 + r0 + (16*r1)), None) tmp22 = tl.load(in_ptr0 + (72 + r0 + (16*r1)), None) tmp25 = tl.load(in_ptr0 + (76 + r0 + (16*r1)), None) tmp34 = tl.load(in_ptr0 + (128 + r0 + (16*r1)), None) tmp36 = tl.load(in_ptr0 + (132 + r0 + (16*r1)), None) tmp39 = tl.load(in_ptr0 + (136 + r0 + (16*r1)), None) tmp42 = tl.load(in_ptr0 + (140 + r0 + (16*r1)), None) tmp51 = tl.load(in_ptr0 + (192 + r0 + (16*r1)), None) tmp53 = tl.load(in_ptr0 + (196 + r0 + (16*r1)), None) tmp56 = tl.load(in_ptr0 + (200 + r0 + (16*r1)), None) tmp59 = tl.load(in_ptr0 + (204 + r0 + (16*r1)), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = tmp11 * tmp11 tmp13 = tmp12 * tmp11 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = tmp28 * tmp28 tmp30 = tmp29 * tmp28 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp35 = tmp34 * tmp34 tmp37 = tmp36 * tmp36 tmp38 = tmp35 + tmp37 tmp40 = tmp39 * tmp39 tmp41 = tmp38 + tmp40 tmp43 = tmp42 * tmp42 tmp44 = tmp41 + tmp43 tmp45 = libdevice.sqrt(tmp44) tmp46 = tmp45 * tmp45 tmp47 = tmp46 * tmp45 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.sum(tmp48, 1)[:, None] tmp52 = tmp51 * tmp51 tmp54 = tmp53 * tmp53 tmp55 = tmp52 + tmp54 tmp57 = tmp56 * tmp56 tmp58 = tmp55 + tmp57 tmp60 = tmp59 * tmp59 tmp61 = tmp58 + tmp60 tmp62 = libdevice.sqrt(tmp61) tmp63 = tmp62 * tmp62 tmp64 = tmp63 * tmp62 tmp65 = tl.broadcast_to(tmp64, [XBLOCK, RBLOCK]) tmp67 = tl.sum(tmp65, 1)[:, None] tmp68 = 4.0 tmp69 = tmp16 * tmp68 tmp70 = 0.0 tmp71 = tmp69 + tmp70 tmp72 = tmp33 * tmp68 tmp73 = tmp71 + tmp72 tmp74 = tmp50 * tmp68 tmp75 = tmp73 + tmp74 tmp76 = tmp67 * tmp68 tmp77 = tmp75 + tmp76 tmp78 = 0.25 tmp79 = tmp77 * tmp78 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp79, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [norm, pow_1, sum_1, mul, norm_1, norm_2, pow_2, sum_2, mul_1, norm_3, norm_4, pow_3, sum_3, mul_2, norm_5, norm_6, pow_4, sum_4, mul_3, norm_7, truediv], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.sum, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0.run(buf4, arg0_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple from abc import ABC from abc import abstractmethod from torch import nn class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class N2(Regularizer): def __init__(self, weight: 'float'): super(N2, self).__init__() self.weight = weight def forward(self, factors): norm = 0 for f in factors: norm += self.weight * torch.sum(torch.norm(f, 2, 1) ** 3) return norm / factors[0].shape[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from typing import Tuple from abc import ABC from abc import abstractmethod from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None) tmp2 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None) tmp5 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None) tmp8 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None) tmp17 = tl.load(in_ptr0 + (64 + r0 + 16 * r1), None) tmp19 = tl.load(in_ptr0 + (68 + r0 + 16 * r1), None) tmp22 = tl.load(in_ptr0 + (72 + r0 + 16 * r1), None) tmp25 = tl.load(in_ptr0 + (76 + r0 + 16 * r1), None) tmp34 = tl.load(in_ptr0 + (128 + r0 + 16 * r1), None) tmp36 = tl.load(in_ptr0 + (132 + r0 + 16 * r1), None) tmp39 = tl.load(in_ptr0 + (136 + r0 + 16 * r1), None) tmp42 = tl.load(in_ptr0 + (140 + r0 + 16 * r1), None) tmp51 = tl.load(in_ptr0 + (192 + r0 + 16 * r1), None) tmp53 = tl.load(in_ptr0 + (196 + r0 + 16 * r1), None) tmp56 = tl.load(in_ptr0 + (200 + r0 + 16 * r1), None) tmp59 = tl.load(in_ptr0 + (204 + r0 + 16 * r1), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = tmp11 * tmp11 tmp13 = tmp12 * tmp11 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = tmp28 * tmp28 tmp30 = tmp29 * tmp28 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp35 = tmp34 * tmp34 tmp37 = tmp36 * tmp36 tmp38 = tmp35 + tmp37 tmp40 = tmp39 * tmp39 tmp41 = tmp38 + tmp40 tmp43 = tmp42 * tmp42 tmp44 = tmp41 + tmp43 tmp45 = libdevice.sqrt(tmp44) tmp46 = tmp45 * tmp45 tmp47 = tmp46 * tmp45 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.sum(tmp48, 1)[:, None] tmp52 = tmp51 * tmp51 tmp54 = tmp53 * tmp53 tmp55 = tmp52 + tmp54 tmp57 = tmp56 * tmp56 tmp58 = tmp55 + tmp57 tmp60 = tmp59 * tmp59 tmp61 = tmp58 + tmp60 tmp62 = libdevice.sqrt(tmp61) tmp63 = tmp62 * tmp62 tmp64 = tmp63 * tmp62 tmp65 = tl.broadcast_to(tmp64, [XBLOCK, RBLOCK]) tmp67 = tl.sum(tmp65, 1)[:, None] tmp68 = 4.0 tmp69 = tmp16 * tmp68 tmp70 = 0.0 tmp71 = tmp69 + tmp70 tmp72 = tmp33 * tmp68 tmp73 = tmp71 + tmp72 tmp74 = tmp50 * tmp68 tmp75 = tmp73 + tmp74 tmp76 = tmp67 * tmp68 tmp77 = tmp75 + tmp76 tmp78 = 0.25 tmp79 = tmp77 * tmp78 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp79, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0[grid(1)](buf4 , arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class N2New(Regularizer): def __init__(self, weight: 'float'): super(N2New, self).__init__() self.weight = weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
uclnlp/cqd
N2
false
16,637
[ "MIT" ]
59
36148c110f336415250c98873fc27ca847741a78
https://github.com/uclnlp/cqd/tree/36148c110f336415250c98873fc27ca847741a78
L2LossWithLogit
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/im/cimi6y3pvgmp5ek4mtvhvumx2ntdf6v6r2jvajes5xh5km7wyxnh.py # Topologically Sorted Source Nodes: [p, mse_loss], Original ATen: [aten.sigmoid, aten.mse_loss] # Source node to ATen node mapping: # mse_loss => pow_1, sub, sum_1 # p => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) triton_per_fused_mse_loss_sigmoid_0 = async_compile.triton('triton_per_fused_mse_loss_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [p, mse_loss], Original ATen: [aten.sigmoid, aten.mse_loss] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_sigmoid_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch from torch import nn class L2LossWithLogit(nn.Module): def __init__(self): super(L2LossWithLogit, self).__init__() self.mse = nn.MSELoss(reduction='sum') def forward(self, logits, targets): p = torch.sigmoid(logits) return self.mse(p, targets) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_mse_loss_sigmoid_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class L2LossWithLogitNew(nn.Module): def __init__(self): super(L2LossWithLogitNew, self).__init__() self.mse = nn.MSELoss(reduction='sum') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ucas-vg/TinyBenchmark
L2LossWithLogit
false
16,638
[ "MIT" ]
495
36436df3716d842b6148fb6f6bc7715a2fbdfd92
https://github.com/ucas-vg/TinyBenchmark/tree/36436df3716d842b6148fb6f6bc7715a2fbdfd92
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tw/ctwsjugie2x4qimo4y2aun3bc6okw62iaakyiqsdnvdysdrgh2wb.py # Topologically Sorted Source Nodes: [sub, mul, add, truediv, add_1], Original ATen: [aten.sub, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_2, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (0)) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = 4.0 tmp11 = tmp9 / tmp10 tmp12 = tmp2 - tmp11 tmp13 = tmp1 * tmp12 tmp14 = tmp3 - tmp11 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp11 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp11 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp8 - tmp11 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = 3.0 tmp26 = tmp24 / tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = 1e-06 tmp29 = tmp27 + tmp28 tmp30 = tmp13 / tmp29 tmp33 = tmp30 + tmp32 tl.store(out_ptr0 + (x2), tmp33, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, mul, add, truediv, add_1], Original ATen: [aten.sub, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, features, eps=1e-06): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(1)) self.beta = nn.Parameter(torch.zeros(1)) self.eps = eps def forward(self, x): mean = x.mean(-1).expand_as(x) std = x.std(-1).expand_as(x) return self.gamma.expand_as(x) * (x - mean) / (std + self.eps ) + self.beta.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + 0) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = 4.0 tmp11 = tmp9 / tmp10 tmp12 = tmp2 - tmp11 tmp13 = tmp1 * tmp12 tmp14 = tmp3 - tmp11 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp11 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp11 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp8 - tmp11 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = 3.0 tmp26 = tmp24 / tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = 1e-06 tmp29 = tmp27 + tmp28 tmp30 = tmp13 / tmp29 tmp33 = tmp30 + tmp32 tl.store(out_ptr0 + x2, tmp33, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, features, eps=1e-06): super(LayerNormNew, self).__init__() self.gamma = nn.Parameter(torch.ones(1)) self.beta = nn.Parameter(torch.zeros(1)) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
uber-common/safemutations
LayerNorm
false
16,639
[ "MIT" ]
91
40e5fd03a244f89bf157d4bedf79201e706aedc1
https://github.com/uber-common/safemutations/tree/40e5fd03a244f89bf157d4bedf79201e706aedc1
HammingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ib/cibac72z76u35abtssk2kixl4ye6m3d6p4dsad6tjd46mmcwsdko.py # Topologically Sorted Source Nodes: [sub, mul, sub_1, mul_1, errors, mean, sum_1], Original ATen: [aten.rsub, aten.mul, aten.add, aten.mean, aten.sum] # Source node to ATen node mapping: # errors => add # mean => mean # mul => mul # mul_1 => mul_1 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [0]), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mean,), kwargs = {}) triton_per_fused_add_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_mean_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_rsub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp8 = tl.load(in_ptr0 + (64 + r0), None) tmp9 = tl.load(in_ptr1 + (64 + r0), None) tmp16 = tl.load(in_ptr0 + (128 + r0), None) tmp17 = tl.load(in_ptr1 + (128 + r0), None) tmp24 = tl.load(in_ptr0 + (192 + r0), None) tmp25 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 - tmp0 tmp6 = tmp5 * tmp1 tmp7 = tmp4 + tmp6 tmp10 = tmp2 - tmp9 tmp11 = tmp8 * tmp10 tmp12 = tmp2 - tmp8 tmp13 = tmp12 * tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp7 + tmp14 tmp18 = tmp2 - tmp17 tmp19 = tmp16 * tmp18 tmp20 = tmp2 - tmp16 tmp21 = tmp20 * tmp17 tmp22 = tmp19 + tmp21 tmp23 = tmp15 + tmp22 tmp26 = tmp2 - tmp25 tmp27 = tmp24 * tmp26 tmp28 = tmp2 - tmp24 tmp29 = tmp28 * tmp25 tmp30 = tmp27 + tmp29 tmp31 = tmp23 + tmp30 tmp32 = 4.0 tmp33 = tmp31 / tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp36, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sub, mul, sub_1, mul_1, errors, mean, sum_1], Original ATen: [aten.rsub, aten.mul, aten.add, aten.mean, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_rsub_sum_0.run(arg1_1, arg0_1, buf1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class HammingLoss(torch.nn.Module): def forward(self, suggested, target): errors = suggested * (1.0 - target) + (1.0 - suggested) * target return errors.mean(dim=0).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_ptr0 + (64 + r0), None) tmp9 = tl.load(in_ptr1 + (64 + r0), None) tmp16 = tl.load(in_ptr0 + (128 + r0), None) tmp17 = tl.load(in_ptr1 + (128 + r0), None) tmp24 = tl.load(in_ptr0 + (192 + r0), None) tmp25 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 - tmp0 tmp6 = tmp5 * tmp1 tmp7 = tmp4 + tmp6 tmp10 = tmp2 - tmp9 tmp11 = tmp8 * tmp10 tmp12 = tmp2 - tmp8 tmp13 = tmp12 * tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp7 + tmp14 tmp18 = tmp2 - tmp17 tmp19 = tmp16 * tmp18 tmp20 = tmp2 - tmp16 tmp21 = tmp20 * tmp17 tmp22 = tmp19 + tmp21 tmp23 = tmp15 + tmp22 tmp26 = tmp2 - tmp25 tmp27 = tmp24 * tmp26 tmp28 = tmp2 - tmp24 tmp29 = tmp28 * tmp25 tmp30 = tmp27 + tmp29 tmp31 = tmp23 + tmp30 tmp32 = 4.0 tmp33 = tmp31 / tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_rsub_sum_0[grid(1)](arg1_1, arg0_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HammingLossNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
uclnlp/torch-imle
HammingLoss
false
16,640
[ "MIT" ]
205
f595cd8d527466f6b5db79276f6ceee01d100a1c
https://github.com/uclnlp/torch-imle/tree/f595cd8d527466f6b5db79276f6ceee01d100a1c
FCGenerator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/et/cetb5tm344c4pwjhwlpvykesfm76nlb2b74rthqhhtbpj2txjaml.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 1024, grid=grid(1024), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class FCGenerator(nn.Module): def __init__(self, options): """ The fully connected generator is initialized by creating a chain of fully connected layers that perform transformations d -> 2 * d -> ... -> 2^(k - 1) * d -> n * n where d = options.state_size k = options.generator_layers n = options.image_size """ super(FCGenerator, self).__init__() self.dropout = options.generator_dropout self.layers = options.generator_layers sizes = [] size = options.state_size for i in range(options.generator_layers): sizes.append(size) size *= 2 sizes.append(options.image_size * options.image_size) for i in range(options.generator_layers): layer = nn.Linear(sizes[i], sizes[i + 1]) self.add_module(f'linear_{i}', layer) def forward(self, x): layers = {} for name, module in self.named_children(): layers[name] = module for i in range(self.layers): layer = layers[f'linear_{i}'] x = layer(x) if i < self.layers - 1: x = F.leaky_relu(x, 0.2) if self.dropout is not None: x = F.dropout(x, self.dropout) return torch.tanh(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'options': _mock_config(generator_dropout=0.5, generator_layers=1, state_size=4, image_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(1024)](buf1, primals_2, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class FCGeneratorNew(nn.Module): def __init__(self, options): """ The fully connected generator is initialized by creating a chain of fully connected layers that perform transformations d -> 2 * d -> ... -> 2^(k - 1) * d -> n * n where d = options.state_size k = options.generator_layers n = options.image_size """ super(FCGeneratorNew, self).__init__() self.dropout = options.generator_dropout self.layers = options.generator_layers sizes = [] size = options.state_size for i in range(options.generator_layers): sizes.append(size) size *= 2 sizes.append(options.image_size * options.image_size) for i in range(options.generator_layers): layer = nn.Linear(sizes[i], sizes[i + 1]) self.add_module(f'linear_{i}', layer) def forward(self, input_0): primals_1 = self.linear_0.weight primals_2 = self.linear_0.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
unicredit/ganzo
FCGenerator
false
16,641
[ "Apache-2.0" ]
73
fb1d270f5091073e8f27da76ab508ab24e5d40e9
https://github.com/unicredit/ganzo/tree/fb1d270f5091073e8f27da76ab508ab24e5d40e9
FusedDownsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ho/cho65iisnaf25ldqwazqthm4dk6kkvugfqryyb5hcwumhgthhuzm.py # Topologically Sorted Source Nodes: [add, add_1, add_2, weight_1], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # weight_1 => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_12), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_16), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 4), kwargs = {}) triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 5 x0 = xindex % 5 x2 = (xindex // 25) x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tmp12 = 0.1767766952966369 tmp13 = tmp11 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = (-1) + x1 tmp17 = tmp16 >= tmp1 tmp18 = tmp16 < tmp3 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp6 tmp21 = tmp20 & tmp7 tmp22 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp21 & xmask, other=0.0) tmp23 = tmp22 * tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp21, tmp23, tmp24) tmp26 = tmp15 + tmp25 tmp27 = (-1) + x0 tmp28 = tmp27 >= tmp1 tmp29 = tmp27 < tmp3 tmp30 = tmp8 & tmp28 tmp31 = tmp30 & tmp29 tmp32 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp12 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp26 + tmp35 tmp37 = tmp19 & tmp28 tmp38 = tmp37 & tmp29 tmp39 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp38 & xmask, other=0.0) tmp40 = tmp39 * tmp12 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp36 + tmp42 tmp44 = 0.25 tmp45 = tmp43 * tmp44 tl.store(in_out_ptr0 + (x4), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/v4/cv4atvq6obm5chp2mxnillb7x7egcougkhuekco3b76wpvmy35ng.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 900) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, add_1, add_2, weight_1], Original ATen: [aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_0.run(buf1, primals_1, 400, grid=grid(400), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 30, 30), (3600, 900, 30, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_2, 14400, grid=grid(14400), stream=stream0) del primals_2 return (buf3, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn from math import sqrt class FusedDownsample(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input): weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1]) weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:, :, 1:, :-1] + weight[:, :, :-1, :-1]) / 4 out = F.conv2d(input, weight, self.bias, stride=2, padding=self.pad) return out def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0 ) tmp12 = 0.1767766952966369 tmp13 = tmp11 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = -1 + x1 tmp17 = tmp16 >= tmp1 tmp18 = tmp16 < tmp3 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp6 tmp21 = tmp20 & tmp7 tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 * tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp21, tmp23, tmp24) tmp26 = tmp15 + tmp25 tmp27 = -1 + x0 tmp28 = tmp27 >= tmp1 tmp29 = tmp27 < tmp3 tmp30 = tmp8 & tmp28 tmp31 = tmp30 & tmp29 tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp12 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp26 + tmp35 tmp37 = tmp19 & tmp28 tmp38 = tmp37 & tmp29 tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask, other=0.0) tmp40 = tmp39 * tmp12 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp36 + tmp42 tmp44 = 0.25 tmp45 = tmp43 * tmp44 tl.store(in_out_ptr0 + x4, tmp45, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 30, 30), (3600, 900, 30, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(14400)](buf3, primals_2, 14400, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf3, primals_3, buf1 class FusedDownsampleNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
uzielroy/StyleGan_FewShot
FusedDownsample
false
16,642
[ "MIT" ]
76
94e4c49dbf39d1c6299f33787afb3e471ece11e3
https://github.com/uzielroy/StyleGan_FewShot/tree/94e4c49dbf39d1c6299f33787afb3e471ece11e3
L1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/i5/ci5r22vnwphjxav3oibgww4fkm25q4egp3rofzniyjru2u4b563f.py # Topologically Sorted Source Nodes: [l1_loss, mul], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] # Source node to ATen node mapping: # l1_loss => abs_1, mean, sub # mul => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1), kwargs = {}) triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [l1_loss, mul], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.onnx class L1Loss(torch.nn.Module): """ L1 loss """ def __init__(self, **kwargs): super(L1Loss, self).__init__() self.loss_w = kwargs.get('loss_weight', 1) def forward(self, preds, gts): return F.l1_loss(preds.view(-1), gts.view(-1)) * self.loss_w def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1LossNew(torch.nn.Module): """ L1 loss """ def __init__(self, **kwargs): super(L1LossNew, self).__init__() self.loss_w = kwargs.get('loss_weight', 1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
usutdzxych/CenseoQoE
L1Loss
false
16,643
[ "BSD-3-Clause" ]
75
3f653296b223da6190e1e1781e7b9b54ff877102
https://github.com/usutdzxych/CenseoQoE/tree/3f653296b223da6190e1e1781e7b9b54ff877102
Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xg/cxg3op264zx2ec3atkmjwmld6htg2wdbd2inqitp272jkpjbeazc.py # Topologically Sorted Source Nodes: [weight_mean, weight], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # weight => sub # weight_mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qr/cqryreojrutxg3jcxvklww4odvjbi37hol2vyqn6ekjgauih7gie.py # Topologically Sorted Source Nodes: [weight_1], Original ATen: [aten.div] # Source node to ATen node mapping: # weight_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp1 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp2 - tmp9 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp4 - tmp9 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp6 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = 3.0 tmp22 = tmp20 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp0 / tmp25 tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_mean, weight], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 del primals_2 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class Linear(nn.Linear): def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True) weight = weight - weight_mean std = weight.std(dim=1, keepdim=True) + 1e-05 weight = weight / std.expand_as(weight) return F.linear(x, weight, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp1 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp2 - tmp9 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp4 - tmp9 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp6 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = 3.0 tmp22 = tmp20 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp0 / tmp25 tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(16)](primals_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 del primals_2 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class LinearNew(nn.Linear): def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
untitled-ai/self_supervised
Linear
false
16,644
[ "MIT" ]
370
6d14ca0402ecc13feda9b3a9fdc056fd1ac24473
https://github.com/untitled-ai/self_supervised/tree/6d14ca0402ecc13feda9b3a9fdc056fd1ac24473
FusedUpsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ho/cho65iisnaf25ldqwazqthm4dk6kkvugfqryyb5hcwumhgthhuzm.py # Topologically Sorted Source Nodes: [add, add_1, add_2, weight_1], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # weight_1 => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_12), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_16), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 4), kwargs = {}) triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 5 x0 = xindex % 5 x2 = (xindex // 25) x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tmp12 = 0.1767766952966369 tmp13 = tmp11 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = (-1) + x1 tmp17 = tmp16 >= tmp1 tmp18 = tmp16 < tmp3 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp6 tmp21 = tmp20 & tmp7 tmp22 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp21 & xmask, other=0.0) tmp23 = tmp22 * tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp21, tmp23, tmp24) tmp26 = tmp15 + tmp25 tmp27 = (-1) + x0 tmp28 = tmp27 >= tmp1 tmp29 = tmp27 < tmp3 tmp30 = tmp8 & tmp28 tmp31 = tmp30 & tmp29 tmp32 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp12 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp26 + tmp35 tmp37 = tmp19 & tmp28 tmp38 = tmp37 & tmp29 tmp39 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp38 & xmask, other=0.0) tmp40 = tmp39 * tmp12 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp36 + tmp42 tmp44 = 0.25 tmp45 = tmp43 * tmp44 tl.store(in_out_ptr0 + (x4), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2q/c2qxl3444r7faal6wdwqwnbo4yy446moujhj4vpwvty2afomxxzq.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 121) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, add_1, add_2, weight_1], Original ATen: [aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_0.run(buf1, primals_1, 400, grid=grid(400), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 11, 11), (484, 121, 11, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_2, 1936, grid=grid(1936), stream=stream0) del primals_2 return (buf3, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn from math import sqrt class FusedUpsample(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input): weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1]) weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:, :, 1:, :-1] + weight[:, :, :-1, :-1]) / 4 out = F.conv_transpose2d(input, weight, self.bias, stride=2, padding=self.pad) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0 ) tmp12 = 0.1767766952966369 tmp13 = tmp11 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = -1 + x1 tmp17 = tmp16 >= tmp1 tmp18 = tmp16 < tmp3 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp6 tmp21 = tmp20 & tmp7 tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 * tmp12 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp21, tmp23, tmp24) tmp26 = tmp15 + tmp25 tmp27 = -1 + x0 tmp28 = tmp27 >= tmp1 tmp29 = tmp27 < tmp3 tmp30 = tmp8 & tmp28 tmp31 = tmp30 & tmp29 tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp12 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp26 + tmp35 tmp37 = tmp19 & tmp28 tmp38 = tmp37 & tmp29 tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask, other=0.0) tmp40 = tmp39 * tmp12 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp36 + tmp42 tmp44 = 0.25 tmp45 = tmp43 * tmp44 tl.store(in_out_ptr0 + x4, tmp45, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 121 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 11, 11), (484, 121, 11, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(1936)](buf3, primals_2, 1936, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf3, primals_3, buf1 class FusedUpsampleNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, padding=0): super().__init__() weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size) bias = torch.zeros(out_channel) fan_in = in_channel * kernel_size * kernel_size self.multiplier = sqrt(2 / fan_in) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) self.pad = padding def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
uzielroy/StyleGan_FewShot
FusedUpsample
false
16,645
[ "MIT" ]
76
94e4c49dbf39d1c6299f33787afb3e471ece11e3
https://github.com/uzielroy/StyleGan_FewShot/tree/94e4c49dbf39d1c6299f33787afb3e471ece11e3
AlbertAttentionWithoutSkipConnection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q6/cq65ro6tojzkasrxq7oijwtyo4qp33fmpaic6mwgwszhlikkuosh.py # Topologically Sorted Source Nodes: [projected_context_layer, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layernormed_context_layer => var_mean # projected_context_layer => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_9), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 + tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + (x0), tmp20, xmask) tl.store(out_ptr1 + (x0), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ak/cakuhvbuo5twl4unqfuk6raawu5rlyk4udc3473bb7zjeofptsbg.py # Topologically Sorted Source Nodes: [projected_context_layer, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layernormed_context_layer => add_1, add_2, mul, mul_1, rsqrt, sub_1 # projected_context_layer => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_9), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_11), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (1, 16, 4), (64, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (1, 16, 4), (0, 4, 1), 0), reinterpret_tensor(primals_8, (1, 4, 4), (0, 1, 4), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [projected_context_layer, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_9, buf12, buf13, 16, grid=grid(16), stream=stream0) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [projected_context_layer, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(buf11, primals_9, buf12, buf13, primals_10, primals_11, buf14, 64, grid=grid(64), stream=stream0) del buf12 del buf13 del primals_11 return (buf14, primals_9, primals_10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), buf11, reinterpret_tensor(buf10, (1, 4, 16), (64, 1, 4), 0), reinterpret_tensor(primals_8, (1, 4, 4), (4, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.utils.checkpoint from torch import nn class AlbertAttentionWithoutSkipConnection(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = (config.hidden_size // config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob ) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self. max_position_embeddings - 1) positional_embedding = positional_embedding if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() w = self.dense.weight.t().view(self.num_attention_heads, self. attention_head_size, self.hidden_size) b = self.dense.bias projected_context_layer = torch.einsum('bfnd,ndh->bfh', context_layer, w) + b projected_context_layer_dropout = self.output_dropout( projected_context_layer) layernormed_context_layer = self.LayerNorm( projected_context_layer_dropout) return (layernormed_context_layer, attention_probs ) if output_attentions else (layernormed_context_layer,) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5, layer_norm_eps=1, position_embedding_type=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.checkpoint from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp7 = tmp4 + tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 + tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp3 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp7 - tmp20 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = tmp12 - tmp20 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp17 - tmp20 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = tmp31 / tmp19 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (1, 16, 4), (64, 4, 1), 0) del buf9 extern_kernels.bmm(reinterpret_tensor(buf10, (1, 16, 4), (0, 4, 1), 0), reinterpret_tensor(primals_8, (1, 4, 4), (0, 1, 4), 0), out =buf11) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_9, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_9, buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_11 return buf14, primals_9, primals_10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), buf11, reinterpret_tensor(buf10, (1, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(primals_8, (1, 4, 4), (4, 4, 1), 0) class AlbertAttentionWithoutSkipConnectionNew(nn.Module): def __init__(self, config): super().__init__() if (config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size')): raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = (config.hidden_size // config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob ) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if (self.position_embedding_type == 'relative_key' or self. position_embedding_type == 'relative_key_query'): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config. max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_8 = self.dense.weight primals_9 = self.dense.bias primals_10 = self.LayerNorm.weight primals_11 = self.LayerNorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
twistedcubic/attention-rank-collapse
AlbertAttentionWithoutSkipConnection
false
16,646
[ "Apache-2.0" ]
118
38b5df6dc2add25f6d945e48a6baf96862368c20
https://github.com/twistedcubic/attention-rank-collapse/tree/38b5df6dc2add25f6d945e48a6baf96862368c20
FCDiscriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ck/cckzkicqhrcfrkfdju5exjvt5jnqvtxaazswol3wnjnjike2p45t.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 16), (16, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 1), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 return (buf1, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class FCDiscriminator(nn.Module): def __init__(self, options): """ The fully connected generator is initialized by creating a chain of fully connected layers that perform transformations n * n -> 2^(k - 1) * d -> ... -> 2 * d -> 1 where d = options.state_size k = options.discriminator_layers n = options.image_size """ super(FCDiscriminator, self).__init__() self.dropout = options.discriminator_dropout self.layers = options.discriminator_layers sizes = [options.image_size * options.image_size] size = options.state_size * 2 ** (options.discriminator_layers - 1) for i in range(options.discriminator_layers - 1): sizes.append(size) size //= 2 sizes.append(1) for i in range(options.discriminator_layers): layer = nn.Linear(sizes[i], sizes[i + 1]) self.add_module(f'linear_{i}', layer) def forward(self, x): batch_size = x.size()[0] x = x.view(batch_size, -1) layers = {} for name, module in self.named_children(): layers[name] = module for i in range(self.layers): layer = layers[f'linear_{i}'] x = layer(x) if i < self.layers - 1: x = F.leaky_relu(x, 0.2) if self.dropout is not None: x = F.dropout(x, self.dropout) return torch.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'options': _mock_config(discriminator_dropout=0.5, discriminator_layers=1, image_size=4, state_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 16), (16, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), reinterpret_tensor(primals_2, (16, 1), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(4)](buf1, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1 class FCDiscriminatorNew(nn.Module): def __init__(self, options): """ The fully connected generator is initialized by creating a chain of fully connected layers that perform transformations n * n -> 2^(k - 1) * d -> ... -> 2 * d -> 1 where d = options.state_size k = options.discriminator_layers n = options.image_size """ super(FCDiscriminatorNew, self).__init__() self.dropout = options.discriminator_dropout self.layers = options.discriminator_layers sizes = [options.image_size * options.image_size] size = options.state_size * 2 ** (options.discriminator_layers - 1) for i in range(options.discriminator_layers - 1): sizes.append(size) size //= 2 sizes.append(1) for i in range(options.discriminator_layers): layer = nn.Linear(sizes[i], sizes[i + 1]) self.add_module(f'linear_{i}', layer) def forward(self, input_0): primals_2 = self.linear_0.weight primals_3 = self.linear_0.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
unicredit/ganzo
FCDiscriminator
false
16,647
[ "Apache-2.0" ]
73
fb1d270f5091073e8f27da76ab508ab24e5d40e9
https://github.com/unicredit/ganzo/tree/fb1d270f5091073e8f27da76ab508ab24e5d40e9
Highway
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v6/cv6xv4wyur6p4yumvlnpmbep5pizgsgd3nrhepr7w3wup56yxdwp.py # Topologically Sorted Source Nodes: [output, gate, mul, sub, mul_1, output_1], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # gate => sigmoid # mul => mul # mul_1 => mul_1 # output => relu # output_1 => add # sub => sub # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp2 tmp9 = tmp6 * tmp8 tmp10 = tmp3 + tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, gate, mul, sub, mul_1, output_1], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(primals_1, buf1, buf0, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_1, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data class Highway(nn.Module): def __init__(self, input_dim, dropout): super(Highway, self).__init__() self.input_linear = nn.Linear(input_dim, input_dim) self.relu = nn.ReLU() self.gate_linear = nn.Linear(input_dim, input_dim) self.sigmoid = nn.Sigmoid() self.dropout = nn.Dropout(dropout) def forward(self, input_): input_ = self.dropout(input_) output = self.relu(self.input_linear(input_)) gate = self.sigmoid(self.gate_linear(input_)) output = input_ * gate + output * (1.0 - gate) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp2 tmp9 = tmp6 * tmp8 tmp10 = tmp3 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](primals_1, buf1, buf0, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_1, buf0, buf1 class HighwayNew(nn.Module): def __init__(self, input_dim, dropout): super(HighwayNew, self).__init__() self.input_linear = nn.Linear(input_dim, input_dim) self.relu = nn.ReLU() self.gate_linear = nn.Linear(input_dim, input_dim) self.sigmoid = nn.Sigmoid() self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.input_linear.weight primals_3 = self.input_linear.bias primals_4 = self.gate_linear.weight primals_5 = self.gate_linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
uwnlp/piqa
Highway
false
16,648
[ "Apache-2.0" ]
89
e18f2189c93965c94655d5cc943dcecdc2c1ea57
https://github.com/uwnlp/piqa/tree/e18f2189c93965c94655d5cc943dcecdc2c1ea57
Router
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py # Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone] # Source node to ATen node mapping: # u_hat => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ts/cts7q6dfb3copgexqebefx4p456ecsgth6p6xmle2mmldywndoi3.py # Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone] # Source node to ATen node mapping: # s => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 + tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp1 / tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gu/cgurb3rc57bwl3qa722rpwfklrhzbjpi2aveuapolt7tdvfpdis7.py # Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone] # Source node to ATen node mapping: # s => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = (xindex // 4) % 4 x3 = (xindex // 16) y0 = yindex x4 = xindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1) + (16*x3) + (64*x2)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + (64*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2t/c2t7stxsalfhsthweh45s4auxfz46xrbwkiajexefoyk5kfgiqco.py # Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, v], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # pow_1 => pow_1 # s2 => sum_2 # sqrt => sqrt # truediv => div_1 # truediv_1 => div_2 # v => mul # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_7, 2), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_2), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_3 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/n3/cn3o76uutxwnkvtnqyvaxx7tnscbvuwvgsyd6kmrbucnocl6jr5o.py # Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone] # Source node to ATen node mapping: # a => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_13,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = (xindex // 4) y0 = yindex x3 = xindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3 + (64*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j7/cj7mv2k5l2kigfluq2rwwpouckm4oow7jia7wwvjogp3qlr23xwv.py # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # c_1 => amax_1, exp_1, sub_1 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vf/cvfvkiz4k3grvhzidjc6vivbeubtw7idhhjrnb6dlbg5vf7fihed.py # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # c_1 => div_3, sum_3 # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/32/c32db6tn7hvzhdl4i5logasyamysp275l3si7xspyfcfakibqvvh.py # Topologically Sorted Source Nodes: [s_1], Original ATen: [aten.bmm] # Source node to ATen node mapping: # s_1 => bmm_3 # Graph fragment: # %bmm_3 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%view_12, %view_5), kwargs = {}) triton_poi_fused_bmm_7 = async_compile.triton('triton_poi_fused_bmm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(x0 // 4)) + (x0 % 4)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dp/cdpsbcq6jlubfjxv6az2acdud37r3h5vv43z6ggwpbd74blne64h.py # Topologically Sorted Source Nodes: [b_2, c_2], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # b_2 => add_5 # c_2 => amax_2, exp_2, sub_2, sum_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_5, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %amax_2), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) triton_poi_fused__softmax_add_8 = async_compile.triton('triton_poi_fused__softmax_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x0), tmp14, xmask) tl.store(out_ptr1 + (x0), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xv/cxvds37zsfn7me7vs5spzrzx36owr723owrhlinbklqa5b6xihbd.py # Topologically Sorted Source Nodes: [b_2, c_2, s_2], Original ATen: [aten.add, aten._softmax, aten.clone] # Source node to ATen node mapping: # b_2 => add_5 # c_2 => amax_2, div_6, exp_2, sub_2 # s_2 => clone_7 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_5, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %amax_2), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_5), kwargs = {}) # %clone_7 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_27,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused__softmax_add_clone_9 = async_compile.triton('triton_poi_fused__softmax_add_clone_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_clone_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_clone_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z2/cz245egjx7g4j7mn3wb3p6jf5sptc7kpicetafs56orxfbjbs2fy.py # Topologically Sorted Source Nodes: [b_2, b_3, c_3], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # b_2 => add_5 # b_3 => add_8 # c_3 => amax_3, exp_3, sub_3, sum_7 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_27), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_8, [1], True), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {}) triton_poi_fused__softmax_add_10 = async_compile.triton('triton_poi_fused__softmax_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 - tmp22 tmp24 = tl_math.exp(tmp23) tmp25 = tmp9 - tmp22 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp15 - tmp22 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp21 - tmp22 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tl.store(out_ptr0 + (x0), tmp22, xmask) tl.store(out_ptr1 + (x0), tmp33, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ux/cux3mtyatkeh2xctjovuxyuzb6jrzez2afgyw5oaihrzojk4rxa5.py # Topologically Sorted Source Nodes: [b_2, b_3, c_3, s_3], Original ATen: [aten.add, aten._softmax, aten.clone] # Source node to ATen node mapping: # b_2 => add_5 # b_3 => add_8 # c_3 => div_9, exp_3, sub_3 # s_3 => clone_10 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_27), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_7), kwargs = {}) # %clone_10 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_37,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused__softmax_add_clone_11 = async_compile.triton('triton_poi_fused__softmax_add_clone_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_clone_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_clone_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + (x2), tmp9, xmask) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jn/cjnqdeixskhije623cnhbvm5o2lfq2nkhcelbbsiuctormqu3kg7.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] # Source node to ATen node mapping: # Graph fragment: # %permute_74 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_12, [0, 2, 1]), kwargs = {}) triton_poi_fused_transpose_12 = async_compile.triton('triton_poi_fused_transpose_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_transpose_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(y0 // 4)) + (y0 % 4)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf1, buf3, 4, 64, grid=grid(4, 64), stream=stream0) buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, v], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf1, buf6, 4, 64, grid=grid(4, 64), stream=stream0) del buf1 buf7 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_6.run(buf8, buf9, 16, 4, grid=grid(16, 4), stream=stream0) buf10 = reinterpret_tensor(buf8, (16, 1, 4), (1, 64, 16), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [s_1], Original ATen: [aten.bmm] triton_poi_fused_bmm_7.run(buf9, buf10, 64, grid=grid(64), stream=stream0) buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [s_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf11) buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [pow_2, s2_1, add_3, truediv_2, add_4, sqrt_1, truediv_3, v_1], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf12, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [b_2, c_2], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_8.run(buf7, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [b_2, c_2, s_2], Original ATen: [aten.add, aten._softmax, aten.clone] triton_poi_fused__softmax_add_clone_9.run(buf7, buf13, buf14, buf15, buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [s_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_3, s2_2, add_6, truediv_4, add_7, sqrt_2, truediv_5, v_2], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf18, buf19, 64, grid=grid(64), stream=stream0) buf20 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf19, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf20) buf21 = buf15; del buf15 # reuse buf22 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [b_2, b_3, c_3], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_10.run(buf7, buf13, buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0); del buf13 # reuse buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [b_2, b_3, c_3, s_3], Original ATen: [aten.add, aten._softmax, aten.clone] triton_poi_fused__softmax_add_clone_11.run(buf23, buf7, buf20, buf21, buf22, buf24, 64, grid=grid(64), stream=stream0) del buf21 del buf22 buf25 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [s_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf25) buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_4, s2_3, add_9, truediv_6, add_10, sqrt_3, truediv_7, v_3], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf25, buf26, 64, grid=grid(64), stream=stream0) buf27 = empty_strided_cuda((16, 4, 1), (4, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] triton_poi_fused_transpose_12.run(buf9, buf27, 16, 4, grid=grid(16, 4), stream=stream0) del buf9 return (buf26, buf4, buf7, buf11, buf16, buf18, buf23, buf25, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 4), 0), buf27, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) class Router(Module): """ ## Routing Algorithm This is the routing mechanism described in the paper. You can use multiple routing layers in your models. This combines calculating $\\mathbf{s}_j$ for this layer and the routing algorithm described in *Procedure 1*. """ def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d: 'int', iterations: 'int'): """ `in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below. `out_caps` and `out_d` are the same for this layer. `iterations` is the number of routing iterations, symbolized by $r$ in the paper. """ super().__init__() self.in_caps = in_caps self.out_caps = out_caps self.iterations = iterations self.softmax = nn.Softmax(dim=1) self.squash = Squash() self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d, out_d), requires_grad=True) def forward(self, u: 'torch.Tensor'): """ The shape of `u` is `[batch_size, n_capsules, n_features]`. These are the capsules from the lower layer. """ u_hat = torch.einsum('ijnm,bin->bijm', self.weight, u) b = u.new_zeros(u.shape[0], self.in_caps, self.out_caps) v = None for i in range(self.iterations): c = self.softmax(b) s = torch.einsum('bij,bijm->bjm', c, u_hat) v = self.squash(s) a = torch.einsum('bjm,bijm->bij', v, u_hat) b = b + a return v def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_caps': 4, 'out_caps': 4, 'in_d': 4, 'out_d': 4, 'iterations': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 + tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp1 / tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = xindex // 4 % 4 x3 = xindex // 16 y0 = yindex x4 = xindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1 + 16 * x3 + 64 * x2), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4 + 64 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex % 4 x2 = xindex // 4 y0 = yindex x3 = xindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * x1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3 + 64 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (x0 // 4) + x0 % 4), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_add_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp25, xmask) @triton.jit def triton_poi_fused__softmax_add_clone_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_add_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = triton_helpers.maximum(tmp10, tmp15) tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp16, tmp21) tmp23 = tmp4 - tmp22 tmp24 = tl_math.exp(tmp23) tmp25 = tmp9 - tmp22 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp15 - tmp22 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp21 - tmp22 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tl.store(out_ptr0 + x0, tmp22, xmask) tl.store(out_ptr1 + x0, tmp33, xmask) @triton.jit def triton_poi_fused__softmax_add_clone_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_transpose_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_2[grid(4, 64)](buf1, buf3, 4, 64, XBLOCK=32, YBLOCK=4, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(4, 64)](buf1, buf6, 4, 64, XBLOCK=32, YBLOCK=4, num_warps=4, num_stages=1) del buf1 buf7 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused__softmax_5[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_6[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK= 4, YBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 1, 4), (1, 64, 16), 0) del buf8 triton_poi_fused_bmm_7[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(buf10, reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf11) buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf12, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused__softmax_add_8[grid(16)](buf7, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused__softmax_add_clone_9[grid(64)](buf7, buf13, buf14, buf15, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf18) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf19, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf20) buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused__softmax_add_10[grid(16)](buf7, buf13, buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0) del buf13 buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused__softmax_add_clone_11[grid(64)](buf23, buf7, buf20, buf21, buf22, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 buf25 = buf20 del buf20 extern_kernels.bmm(reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf25) buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((16, 4, 1), (4, 1, 4), torch.float32) triton_poi_fused_transpose_12[grid(16, 4)](buf9, buf27, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 return (buf26, buf4, buf7, buf11, buf16, buf18, buf23, buf25, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 4), 0), buf27, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0)) class Squash(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, s: 'torch.Tensor'): """ The shape of `s` is `[batch_size, n_capsules, n_features]` """ s2 = (s ** 2).sum(dim=-1, keepdims=True) return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon)) class RouterNew(Module): """ ## Routing Algorithm This is the routing mechanism described in the paper. You can use multiple routing layers in your models. This combines calculating $\\mathbf{s}_j$ for this layer and the routing algorithm described in *Procedure 1*. """ def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d: 'int', iterations: 'int'): """ `in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below. `out_caps` and `out_d` are the same for this layer. `iterations` is the number of routing iterations, symbolized by $r$ in the paper. """ super().__init__() self.in_caps = in_caps self.out_caps = out_caps self.iterations = iterations self.softmax = nn.Softmax(dim=1) self.squash = Squash() self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d, out_d), requires_grad=True) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
techthiyanes/annotated_deep_learning_paper_implementations
Router
false
16,649
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
NoiseInjection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sh/cshxt5kdwvwrnmv4y7fquk3nnie6s6bpxlie6ihvmgv7xekouvha.py # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x3), xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_3, primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class NoiseInjection(nn.Module): def __init__(self, channel): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1)) def forward(self, image, noise): return image + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x3, xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_3, primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class NoiseInjectionNew(nn.Module): def __init__(self, channel): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1)) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
uzielroy/StyleGan_FewShot
NoiseInjection
false
16,650
[ "MIT" ]
76
94e4c49dbf39d1c6299f33787afb3e471ece11e3
https://github.com/uzielroy/StyleGan_FewShot/tree/94e4c49dbf39d1c6299f33787afb3e471ece11e3
L2Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xb/cxbfqhjndk4df3a6hrj42vxfep4zeraszq762f3l3s74bg72sk7c.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.nn.parallel import torch.backends.cudnn import torch.distributed import torch.multiprocessing import torch.nn as nn import torch.nn.functional as F import torch.optim class L2Normalize(nn.Module): def __init__(self, dim): super(L2Normalize, self).__init__() self.dim = dim def forward(self, x): return F.normalize(x, p=2, dim=self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn import torch.nn.parallel import torch.backends.cudnn import torch.distributed import torch.multiprocessing import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class L2NormalizeNew(nn.Module): def __init__(self, dim): super(L2NormalizeNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
valeoai/obow
L2Normalize
false
16,651
[ "Apache-2.0" ]
84
3758504f5e058275725c35ca7faca3731572b911
https://github.com/valeoai/obow/tree/3758504f5e058275725c35ca7faca3731572b911
LDS
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hs/chsgbajkvlzt23dbj5auzazquzfdbhbhjrpqoczeg3opck4yocad.py # Topologically Sorted Source Nodes: [x_pool1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_pool1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nr/cnrbe3nvltwdq6nzygks2nlpc4u4zxhrbnkdhor67pxgszuqoi72.py # Topologically Sorted Source Nodes: [x_pool1, x_pool2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_pool1 => _low_memory_max_pool2d_with_offsets # x_pool2 => _low_memory_max_pool2d_with_offsets_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %_low_memory_max_pool2d_with_offsets_1 : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%getitem, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kx/ckxuczdcgyuqg7xr3et3ggizfcvawbmrqtlg7hde7gtvmwnnzcpu.py # Topologically Sorted Source Nodes: [x_pool3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_pool3 => getitem_4 # Graph fragment: # %getitem_4 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [x_pool1], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16384, grid=grid(16384), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_pool1, x_pool2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf0, buf1, 4096, grid=grid(4096), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_pool3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_2.run(buf1, buf2, 1024, grid=grid(1024), stream=stream0) del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class LDS(nn.Module): def __init__(self): super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) def forward(self, x): x_pool1 = self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return x_pool3 def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](arg0_1, buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_1[grid(4096)](buf0, buf1, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_2[grid(1024)](buf1, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf1 return buf2, class LDSNew(nn.Module): def __init__(self): super(LDSNew, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
vaesl/LRF-Net
LDS
false
16,653
[ "MIT" ]
180
e44b120dd55288c02852f8e58cda31313525d748
https://github.com/vaesl/LRF-Net/tree/e44b120dd55288c02852f8e58cda31313525d748
conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vw/cvwklowagvx5sksvhxwnsr3o6vulpztvjpqkljpmfcmgvcblg6xo.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 400, grid=grid(400), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable def spectral_norm(module, name='weight'): SpectralNorm.apply(module, name) return module class SpectralNorm: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') size = weight.size() weight_mat = weight.contiguous().view(size[0], -1) if weight_mat.is_cuda: u = u v = weight_mat.t() @ u v = v / v.norm() u = weight_mat @ v u = u / u.norm() weight_sn = weight_mat / (u.t() @ weight_mat @ v) weight_sn = weight_sn.view(*size) return weight_sn, Variable(u.data) @staticmethod def apply(module, name): fn = SpectralNorm(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) input_size = weight.size(0) u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False) setattr(module, name + '_u', u) setattr(module, name, fn.compute_weight(module)[0]) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight_sn, u = self.compute_weight(module) setattr(module, self.name, weight_sn) setattr(module, self.name + '_u', u) class conv2d(nn.Module): def __init__(self, in_channels, out_channels, padding, kernel_size=4, stride=2, spectral_normed=False): super(conv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) if spectral_normed: self.conv = spectral_norm(self.conv) def forward(self, input): out = self.conv(input) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(400)](buf1, primals_2, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 def spectral_norm(module, name='weight'): SpectralNorm.apply(module, name) return module class SpectralNorm: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') size = weight.size() weight_mat = weight.contiguous().view(size[0], -1) if weight_mat.is_cuda: u = u v = weight_mat.t() @ u v = v / v.norm() u = weight_mat @ v u = u / u.norm() weight_sn = weight_mat / (u.t() @ weight_mat @ v) weight_sn = weight_sn.view(*size) return weight_sn, Variable(u.data) @staticmethod def apply(module, name): fn = SpectralNorm(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) input_size = weight.size(0) u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False) setattr(module, name + '_u', u) setattr(module, name, fn.compute_weight(module)[0]) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight_sn, u = self.compute_weight(module) setattr(module, self.name, weight_sn) setattr(module, self.name + '_u', u) class conv2dNew(nn.Module): def __init__(self, in_channels, out_channels, padding, kernel_size=4, stride=2, spectral_normed=False): super(conv2dNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) if spectral_normed: self.conv = spectral_norm(self.conv) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
vandit15/Self-Supervised-Gans-Pytorch
conv2d
false
16,654
[ "MIT" ]
66
01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
https://github.com/vandit15/Self-Supervised-Gans-Pytorch/tree/01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
EntropyLossEncap
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wp/cwp4ssoyg4ryz2y6xj5oqcpwqbhw5pjsafwlwacoijsmc4meri3d.py # Topologically Sorted Source Nodes: [add, log, b, sum_1, b_1, b_2], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # add => add # b => mul # b_1 => mul_1 # b_2 => mean # log => log # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, 1e-12), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %log), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_add_log_mean_mul_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + ((64*(r0 // 16)) + (r0 % 16)), None) tmp5 = tl.load(in_ptr0 + (16 + (64*(r0 // 16)) + (r0 % 16)), None) tmp10 = tl.load(in_ptr0 + (32 + (64*(r0 // 16)) + (r0 % 16)), None) tmp15 = tl.load(in_ptr0 + (48 + (64*(r0 // 16)) + (r0 % 16)), None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tmp4 + tmp8 tmp11 = tmp10 + tmp1 tmp12 = tl_math.log(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 + tmp1 tmp17 = tl_math.log(tmp16) tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = -1.0 tmp21 = tmp19 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = 64.0 tmp26 = tmp24 / tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, log, b, sum_1, b_1, b_2], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_mul_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def feature_map_permute(input): s = input.data.shape l = len(s) if l == 2: x = input elif l == 3: x = input.permute(0, 2, 1) elif l == 4: x = input.permute(0, 2, 3, 1) elif l == 5: x = input.permute(0, 2, 3, 4, 1) else: x = [] None x = x.contiguous() x = x.view(-1, s[1]) return x class EntropyLoss(nn.Module): def __init__(self, eps=1e-12): super(EntropyLoss, self).__init__() self.eps = eps def forward(self, x): b = x * torch.log(x + self.eps) b = -1.0 * b.sum(dim=1) b = b.mean() return b class EntropyLossEncap(nn.Module): def __init__(self, eps=1e-12): super(EntropyLossEncap, self).__init__() self.eps = eps self.entropy_loss = EntropyLoss(eps) def forward(self, input): score = feature_map_permute(input) ent_loss_val = self.entropy_loss(score) return ent_loss_val def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None) tmp5 = tl.load(in_ptr0 + (16 + 64 * (r0 // 16) + r0 % 16), None) tmp10 = tl.load(in_ptr0 + (32 + 64 * (r0 // 16) + r0 % 16), None) tmp15 = tl.load(in_ptr0 + (48 + 64 * (r0 // 16) + r0 % 16), None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tmp4 + tmp8 tmp11 = tmp10 + tmp1 tmp12 = tl_math.log(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 + tmp1 tmp17 = tl_math.log(tmp16) tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = -1.0 tmp21 = tmp19 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = 64.0 tmp26 = tmp24 / tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_mul_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, def feature_map_permute(input): s = input.data.shape l = len(s) if l == 2: x = input elif l == 3: x = input.permute(0, 2, 1) elif l == 4: x = input.permute(0, 2, 3, 1) elif l == 5: x = input.permute(0, 2, 3, 4, 1) else: x = [] None x = x.contiguous() x = x.view(-1, s[1]) return x class EntropyLoss(nn.Module): def __init__(self, eps=1e-12): super(EntropyLoss, self).__init__() self.eps = eps def forward(self, x): b = x * torch.log(x + self.eps) b = -1.0 * b.sum(dim=1) b = b.mean() return b class EntropyLossEncapNew(nn.Module): def __init__(self, eps=1e-12): super(EntropyLossEncapNew, self).__init__() self.eps = eps self.entropy_loss = EntropyLoss(eps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
vartikagpt10/memae-anomaly-detection
EntropyLossEncap
false
16,655
[ "MIT" ]
297
ceece7714fb241e82ef3f3785d3d1ed86c28113e
https://github.com/vartikagpt10/memae-anomaly-detection/tree/ceece7714fb241e82ef3f3785d3d1ed86c28113e
deconv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [4, 4], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(4, 4), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable def spectral_norm(module, name='weight'): SpectralNorm.apply(module, name) return module class SpectralNorm: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') size = weight.size() weight_mat = weight.contiguous().view(size[0], -1) if weight_mat.is_cuda: u = u v = weight_mat.t() @ u v = v / v.norm() u = weight_mat @ v u = u / u.norm() weight_sn = weight_mat / (u.t() @ weight_mat @ v) weight_sn = weight_sn.view(*size) return weight_sn, Variable(u.data) @staticmethod def apply(module, name): fn = SpectralNorm(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) input_size = weight.size(0) u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False) setattr(module, name + '_u', u) setattr(module, name, fn.compute_weight(module)[0]) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight_sn, u = self.compute_weight(module) setattr(module, self.name, weight_sn) setattr(module, self.name + '_u', u) class deconv2d(nn.Module): def __init__(self, in_channels, out_channels, padding, kernel_size=(4, 4), stride=(2, 2), spectral_normed=False, iter=1): super(deconv2d, self).__init__() self.devconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) if spectral_normed: self.devconv = spectral_norm(self.deconv) def forward(self, input): out = self.devconv(input) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(4, 4), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 def spectral_norm(module, name='weight'): SpectralNorm.apply(module, name) return module class SpectralNorm: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') size = weight.size() weight_mat = weight.contiguous().view(size[0], -1) if weight_mat.is_cuda: u = u v = weight_mat.t() @ u v = v / v.norm() u = weight_mat @ v u = u / u.norm() weight_sn = weight_mat / (u.t() @ weight_mat @ v) weight_sn = weight_sn.view(*size) return weight_sn, Variable(u.data) @staticmethod def apply(module, name): fn = SpectralNorm(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) input_size = weight.size(0) u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False) setattr(module, name + '_u', u) setattr(module, name, fn.compute_weight(module)[0]) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight_sn, u = self.compute_weight(module) setattr(module, self.name, weight_sn) setattr(module, self.name + '_u', u) class deconv2dNew(nn.Module): def __init__(self, in_channels, out_channels, padding, kernel_size=(4, 4), stride=(2, 2), spectral_normed=False, iter=1): super(deconv2dNew, self).__init__() self.devconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) if spectral_normed: self.devconv = spectral_norm(self.deconv) def forward(self, input_0): primals_1 = self.devconv.weight primals_2 = self.devconv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
vandit15/Self-Supervised-Gans-Pytorch
deconv2d
false
16,656
[ "MIT" ]
66
01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
https://github.com/vandit15/Self-Supervised-Gans-Pytorch/tree/01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
L1RankLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zy/czyaqaswog54k5fz5sdwhlz7hwdf6zolfgey3fz6oot4aowlu24e.py # Topologically Sorted Source Nodes: [l1_loss], Original ATen: [aten.sub, aten.abs, aten.mean] # Source node to ATen node mapping: # l1_loss => abs_1, mean, sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_mean_sub_0 = async_compile.triton('triton_per_fused_abs_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bl/cblu4dwr5pmmldvo2hzcn6wt6h2pqnghlksdsu4fy5g45hgsyf46.py # Topologically Sorted Source Nodes: [img_label, sub_1, abs_1, lt, sub_2, abs_2, gt, masks_hard, sub, masks, neg, preds_1, sub_3, mul_1, relu, rank_loss, sum_1, sum_2], Original ATen: [aten.repeat, aten.sub, aten.abs, aten.lt, aten.gt, aten.bitwise_and, aten.sign, aten.neg, aten.mul, aten.relu, aten.sum] # Source node to ATen node mapping: # abs_1 => abs_2 # abs_2 => abs_3 # gt => gt # img_label => repeat_1 # lt => lt # masks => sign # masks_hard => bitwise_and # mul_1 => mul_1 # neg => neg # preds_1 => repeat # rank_loss => mul_2 # relu => relu # sub => sub_1 # sub_1 => sub_2 # sub_2 => sub_3 # sub_3 => sub_4 # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %repeat_1 : [num_users=4] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze_1, [256, 1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat_1, %permute_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_2, 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat_1, %permute_1), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_3,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%abs_3, 0), kwargs = {}) # %bitwise_and : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%lt, %gt), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat_1, %permute_1), kwargs = {}) # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%sub_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sign,), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [256, 1]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat, %permute), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_4), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bitwise_and, %relu), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%bitwise_and,), kwargs = {}) triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1 = async_compile.triton('triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[8, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i64', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 8 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp25 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.int64) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 % 256), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + ((32*x0) + (r1 // 256)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + (r1 % 256), rmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.load(in_ptr1 + ((32*x0) + (r1 // 256)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.0 tmp7 = tmp3 > tmp6 tmp8 = tmp5 & tmp7 tmp9 = tmp8.to(tl.float32) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tmp10 < tmp2 tmp12 = tmp11.to(tl.int8) tmp13 = tmp2 < tmp10 tmp14 = tmp13.to(tl.int8) tmp15 = tmp12 - tmp14 tmp16 = tmp15.to(tmp2.dtype) tmp17 = -tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp17 * tmp20 tmp22 = triton_helpers.maximum(tmp10, tmp21) tmp23 = tmp9 * tmp22 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = _tmp25 + tmp24 _tmp25 = tl.where(rmask & xmask, tmp26, _tmp25) tmp27 = tmp8.to(tl.int64) tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp25 = tl.sum(_tmp25, 1)[:, None] tl.store(out_ptr0 + (x0), tmp25, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr1 + (x0), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cu/ccuqvjfbnlmk3sbomze5yzjtuaa4yft3w3cygmwvaawn2oy5orqu.py # Topologically Sorted Source Nodes: [l1_loss, l1_loss_1, img_label, sub_1, abs_1, lt, sub_2, abs_2, gt, masks_hard, sub, masks, neg, preds_1, sub_3, mul_1, relu, rank_loss, sum_1, sum_2, add, rank_loss_1, mul_3, loss_total], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul, aten.repeat, aten.lt, aten.gt, aten.bitwise_and, aten.sign, aten.neg, aten.relu, aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # abs_1 => abs_2 # abs_2 => abs_3 # add => add # gt => gt # img_label => repeat_1 # l1_loss => abs_1, mean, sub # l1_loss_1 => mul # loss_total => add_1 # lt => lt # masks => sign # masks_hard => bitwise_and # mul_1 => mul_1 # mul_3 => mul_3 # neg => neg # preds_1 => repeat # rank_loss => mul_2 # rank_loss_1 => div # relu => relu # sub => sub_1 # sub_1 => sub_2 # sub_2 => sub_3 # sub_3 => sub_4 # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1), kwargs = {}) # %repeat_1 : [num_users=4] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze_1, [256, 1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat_1, %permute_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_2, 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat_1, %permute_1), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_3,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%abs_3, 0), kwargs = {}) # %bitwise_and : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%lt, %gt), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat_1, %permute_1), kwargs = {}) # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%sub_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sign,), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [256, 1]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%repeat, %permute), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_4), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bitwise_and, %relu), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%bitwise_and,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_3), kwargs = {}) triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2 = async_compile.triton('triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp4 = tl.load(in_ptr1 + (r0), None) tmp8 = tl.load(in_out_ptr0 + (0)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = 1.0 tmp13 = tmp11 * tmp12 tmp14 = tmp7.to(tl.float32) tmp15 = 1e-08 tmp16 = tmp14 + tmp15 tmp17 = tmp3 / tmp16 tmp18 = tmp17 * tmp12 tmp19 = tmp13 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [l1_loss], Original ATen: [aten.sub, aten.abs, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_sub_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((8, ), (1, ), torch.float32) buf3 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [img_label, sub_1, abs_1, lt, sub_2, abs_2, gt, masks_hard, sub, masks, neg, preds_1, sub_3, mul_1, relu, rank_loss, sum_1, sum_2], Original ATen: [aten.repeat, aten.sub, aten.abs, aten.lt, aten.gt, aten.bitwise_and, aten.sign, aten.neg, aten.mul, aten.relu, aten.sum] triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1.run(arg1_1, arg0_1, buf1, buf3, 8, 8192, grid=grid(8), stream=stream0) del arg0_1 del arg1_1 buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [l1_loss, l1_loss_1, img_label, sub_1, abs_1, lt, sub_2, abs_2, gt, masks_hard, sub, masks, neg, preds_1, sub_3, mul_1, relu, rank_loss, sum_1, sum_2, add, rank_loss_1, mul_3, loss_total], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul, aten.repeat, aten.lt, aten.gt, aten.bitwise_and, aten.sign, aten.neg, aten.relu, aten.sum, aten.add, aten.div] triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2.run(buf5, buf1, buf3, 1, 8, grid=grid(1), stream=stream0) del buf1 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.onnx class L1RankLoss(torch.nn.Module): """ L1 loss + Rank loss """ def __init__(self, **kwargs): super(L1RankLoss, self).__init__() self.l1_w = kwargs.get('l1_w', 1) self.rank_w = kwargs.get('rank_w', 1) self.hard_thred = kwargs.get('hard_thred', 1) self.use_margin = kwargs.get('use_margin', False) def forward(self, preds, gts): preds = preds.view(-1) gts = gts.view(-1) l1_loss = F.l1_loss(preds, gts) * self.l1_w n = len(preds) preds = preds.unsqueeze(0).repeat(n, 1) preds_t = preds.t() img_label = gts.unsqueeze(0).repeat(n, 1) img_label_t = img_label.t() masks = torch.sign(img_label - img_label_t) masks_hard = (torch.abs(img_label - img_label_t) < self.hard_thred) & ( torch.abs(img_label - img_label_t) > 0) if self.use_margin: rank_loss = masks_hard * torch.relu(torch.abs(img_label - img_label_t) - masks * (preds - preds_t)) else: rank_loss = masks_hard * torch.relu(-masks * (preds - preds_t)) rank_loss = rank_loss.sum() / (masks_hard.sum() + 1e-08) loss_total = l1_loss + rank_loss * self.rank_w return loss_total def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1( in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 8 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp25 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.int64) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + r1 % 256, rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (32 * x0 + r1 // 256), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + r1 % 256, rmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tl.load(in_ptr1 + (32 * x0 + r1 // 256), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.0 tmp7 = tmp3 > tmp6 tmp8 = tmp5 & tmp7 tmp9 = tmp8.to(tl.float32) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tmp10 < tmp2 tmp12 = tmp11.to(tl.int8) tmp13 = tmp2 < tmp10 tmp14 = tmp13.to(tl.int8) tmp15 = tmp12 - tmp14 tmp16 = tmp15.to(tmp2.dtype) tmp17 = -tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp17 * tmp20 tmp22 = triton_helpers.maximum(tmp10, tmp21) tmp23 = tmp9 * tmp22 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = _tmp25 + tmp24 _tmp25 = tl.where(rmask & xmask, tmp26, _tmp25) tmp27 = tmp8.to(tl.int64) tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp25 = tl.sum(_tmp25, 1)[:, None] tl.store(out_ptr0 + x0, tmp25, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr1 + x0, tmp29, xmask) @triton.jit def triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_out_ptr0 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = 1.0 tmp13 = tmp11 * tmp12 tmp14 = tmp7.to(tl.float32) tmp15 = 1e-08 tmp16 = tmp14 + tmp15 tmp17 = tmp3 / tmp16 tmp18 = tmp17 * tmp12 tmp19 = tmp13 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_abs_mean_sub_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((8,), (1,), torch.float32) buf3 = empty_strided_cuda((8,), (1,), torch.int64) triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1[ grid(8)](arg1_1, arg0_1, buf1, buf3, 8, 8192, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2[ grid(1)](buf5, buf1, buf3, 1, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf3 return buf5, class L1RankLossNew(torch.nn.Module): """ L1 loss + Rank loss """ def __init__(self, **kwargs): super(L1RankLossNew, self).__init__() self.l1_w = kwargs.get('l1_w', 1) self.rank_w = kwargs.get('rank_w', 1) self.hard_thred = kwargs.get('hard_thred', 1) self.use_margin = kwargs.get('use_margin', False) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
usutdzxych/CenseoQoE
L1RankLoss
false
16,658
[ "BSD-3-Clause" ]
75
3f653296b223da6190e1e1781e7b9b54ff877102
https://github.com/usutdzxych/CenseoQoE/tree/3f653296b223da6190e1e1781e7b9b54ff877102
Iter_Downsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hs/chsgbajkvlzt23dbj5auzazquzfdbhbhjrpqoczeg3opck4yocad.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nr/cnrbe3nvltwdq6nzygks2nlpc4u4zxhrbnkdhor67pxgszuqoi72.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_1 => _low_memory_max_pool2d_with_offsets # input_2 => _low_memory_max_pool2d_with_offsets_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %_low_memory_max_pool2d_with_offsets_1 : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%getitem, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a5/ca5wvwrlz7kwiw2sk43efbo2ly3lro4e74prptburpb3r7vgz5zx.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x1 => getitem_4 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 9) % 9 x0 = xindex % 9 x2 = (xindex // 81) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-17) + (2*x0) + (32*x1) + (256*x2)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-16) + (2*x0) + (32*x1) + (256*x2)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2*x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (32*x1) + (256*x2)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + ((2*x0) + (32*x1) + (256*x2)), tmp26 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tl.store(out_ptr0 + (x4), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uu/cuukcqqugxvcaymcmsaim57s6wkbml34nuj53m42esdnkthr742j.py # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x2 => getitem_6 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (18*x1) + (81*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (18*x1) + (81*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (9 + (2*x0) + (18*x1) + (81*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (10 + (2*x0) + (18*x1) + (81*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3y/c3ygxtlyjq36brgglncda5thxh5xyqt3qxcovyydck2jxgv5g7f6.py # Topologically Sorted Source Nodes: [x3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x3 => getitem_8 # Graph fragment: # %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2*x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tl.store(out_ptr0 + (x4), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3r/c3r33rvw4v6uytwpoyuauqmxzddhilhmyqzbxnpdimrwtzxtyiw4.py # Topologically Sorted Source Nodes: [x4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x4 => getitem_10 # Graph fragment: # %getitem_10 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_5, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (9*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (9*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + (9*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + (9*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16384, grid=grid(16384), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf0, buf1, 4096, grid=grid(4096), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_2.run(buf1, buf2, 1296, grid=grid(1296), stream=stream0) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_4.run(buf3, buf4, 144, grid=grid(144), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf4, buf5, 16, grid=grid(16), stream=stream0) return (buf2, buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class Iter_Downsample(nn.Module): def __init__(self): super(Iter_Downsample, self).__init__() self.init_ds = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2, padding=0), nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) self.ds1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.ds3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.init_ds(x) x1 = self.ds1(x) x2 = self.ds2(x1) x3 = self.ds3(x2) x4 = self.ds4(x3) return x1, x2, x3, x4 def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 9 % 9 x0 = xindex % 9 x2 = xindex // 81 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-17 + 2 * x0 + 32 * x1 + 256 * x2), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-16 + 2 * x0 + 32 * x1 + 256 * x2), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 32 * x1 + 256 * x2), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (2 * x0 + 32 * x1 + 256 * x2), tmp26 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tl.store(out_ptr0 + x4, tmp28, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (9 + 2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (10 + 2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tl.store(out_ptr0 + x4, tmp28, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](arg0_1, buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_1[grid(4096)](buf0, buf1, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_2[grid(1296)](buf1, buf2, 1296, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_4[grid(144)](buf3, buf4, 144, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_5[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf2, buf3, buf4, buf5 class Iter_DownsampleNew(nn.Module): def __init__(self): super(Iter_DownsampleNew, self).__init__() self.init_ds = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2, padding=0), nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) self.ds1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.ds3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1], output[2], output[3]
vaesl/LFIP
Iter_Downsample
false
16,659
[ "MIT" ]
59
eb9d934616c508c9a9032f170baa1d97fa792822
https://github.com/vaesl/LFIP/tree/eb9d934616c508c9a9032f170baa1d97fa792822
_Residual_Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ca/ccalm3hakjc3y6pyouzotjhedyolmiuxj66frcn7j2bllcgh7gjc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (1048576*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pi/cpiqq6ydqends5jgnpbakdwghpnovztoymnjrsnfd3ycpt6b7cp7.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4v/c4vqqewff6jt6xurlq2ruxgxe6f3x2tc3oikdslqjt4spwvkx5nm.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu] # Source node to ATen node mapping: # output => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p6/cp6qjspvhx26bwsu6z65lduswm3erde6dgipulfvyjgrz5ynfyrb.py # Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # output_2 => mul # output_3 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = (yindex // 4096) tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + (256*y3)), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (y0 + (4096*x2) + (1048576*y1)), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (256, 256, 3, 3), (2304, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 1024, 4096, grid=grid(1024, 4096), stream=stream0) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf4, 4194304, grid=grid(4194304), stream=stream0) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(buf5, buf0, buf6, 16384, 256, grid=grid(16384, 256), stream=stream0) del buf5 return (buf6, buf0, buf1, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _Residual_Block(nn.Module): def __init__(self): super(_Residual_Block, self).__init__() self.conv1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False) def forward(self, x): identity_data = x output = self.relu(self.conv1(x)) output = self.conv2(output) output *= 0.1 output = torch.add(output, identity_data) return output def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 256 * y3), xmask, eviction_policy= 'evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (y0 + 4096 * x2 + 1048576 * y1), tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (256, 256, 3, 3), (2304, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_3, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf4 = buf3 del buf3 triton_poi_fused_relu_2[grid(4194304)](buf4, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) triton_poi_fused_add_mul_3[grid(16384, 256)](buf5, buf0, buf6, 16384, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf5 return buf6, buf0, buf1, buf2, buf4 class _Residual_BlockNew(nn.Module): def __init__(self): super(_Residual_BlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
twtygqyy/pytorch-EDSR
_Residual_Block
false
16,661
[ "MIT" ]
59
001031b6563fcc45d4e7edb7e14c41fb9982ce64
https://github.com/twtygqyy/pytorch-EDSR/tree/001031b6563fcc45d4e7edb7e14c41fb9982ce64
Residual_D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py # Topologically Sorted Source Nodes: [out, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out => convolution # relu_1 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jo/cjoqso3hswww63ipajf5rajmdselmd7i4cc7uvpi6pg7kxrf5r65.py # Topologically Sorted Source Nodes: [out_1, out_2, add], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # add => add # out_1 => convolution_1 # out_2 => convolution_2 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %convolution_1), kwargs = {}) triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [out_1, out_2, add], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_2.run(buf5, buf4, primals_7, primals_5, 256, grid=grid(256), stream=stream0) del buf4 del primals_5 del primals_7 return (buf5, primals_1, primals_2, primals_4, primals_6, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable def spectral_norm(module, name='weight'): SpectralNorm.apply(module, name) return module class SpectralNorm: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') size = weight.size() weight_mat = weight.contiguous().view(size[0], -1) if weight_mat.is_cuda: u = u v = weight_mat.t() @ u v = v / v.norm() u = weight_mat @ v u = u / u.norm() weight_sn = weight_mat / (u.t() @ weight_mat @ v) weight_sn = weight_sn.view(*size) return weight_sn, Variable(u.data) @staticmethod def apply(module, name): fn = SpectralNorm(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) input_size = weight.size(0) u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False) setattr(module, name + '_u', u) setattr(module, name, fn.compute_weight(module)[0]) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight_sn, u = self.compute_weight(module) setattr(module, self.name, weight_sn) setattr(module, self.name + '_u', u) class conv2d(nn.Module): def __init__(self, in_channels, out_channels, padding, kernel_size=4, stride=2, spectral_normed=False): super(conv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) if spectral_normed: self.conv = spectral_norm(self.conv) def forward(self, input): out = self.conv(input) return out class Residual_D(nn.Module): def __init__(self, in_channels, out_channels, kernel=3, stride=1, spectral_normed=False, down_sampling=False, is_start=False): super(Residual_D, self).__init__() self.down_sampling = down_sampling self.is_start = is_start self.avgpool_short = nn.AvgPool2d(2, 2, padding=1) self.conv_short = conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, spectral_normed=False) self.conv1 = conv2d(in_channels, out_channels, spectral_normed= spectral_normed, kernel_size=kernel, stride=stride, padding=1) self.conv2 = conv2d(out_channels, out_channels, spectral_normed= spectral_normed, kernel_size=kernel, stride=stride, padding=1) self.avgpool2 = nn.AvgPool2d(2, 2, padding=1) self.relu = nn.ReLU() def forward(self, x): input = x if self.is_start: conv1 = self.relu(self.conv1(x)) conv2 = self.relu(self.conv2(conv1)) if self.down_sampling: conv2 = self.avgpool2(conv2) else: conv1 = self.conv1(self.relu(x)) conv2 = self.conv2(self.relu(conv1)) if self.down_sampling: conv2 = self.avgpool2(conv2) if self.down_sampling: input = self.avgpool_short(input) resi = self.conv_short(input) return resi + conv2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf3 del buf3 triton_poi_fused_add_convolution_2[grid(256)](buf5, buf4, primals_7, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_5 del primals_7 return buf5, primals_1, primals_2, primals_4, primals_6, buf0, buf2 def spectral_norm(module, name='weight'): SpectralNorm.apply(module, name) return module class SpectralNorm: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') u = getattr(module, self.name + '_u') size = weight.size() weight_mat = weight.contiguous().view(size[0], -1) if weight_mat.is_cuda: u = u v = weight_mat.t() @ u v = v / v.norm() u = weight_mat @ v u = u / u.norm() weight_sn = weight_mat / (u.t() @ weight_mat @ v) weight_sn = weight_sn.view(*size) return weight_sn, Variable(u.data) @staticmethod def apply(module, name): fn = SpectralNorm(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) input_size = weight.size(0) u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False) setattr(module, name + '_u', u) setattr(module, name, fn.compute_weight(module)[0]) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight_sn, u = self.compute_weight(module) setattr(module, self.name, weight_sn) setattr(module, self.name + '_u', u) class conv2d(nn.Module): def __init__(self, in_channels, out_channels, padding, kernel_size=4, stride=2, spectral_normed=False): super(conv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) if spectral_normed: self.conv = spectral_norm(self.conv) def forward(self, input): out = self.conv(input) return out class Residual_DNew(nn.Module): def __init__(self, in_channels, out_channels, kernel=3, stride=1, spectral_normed=False, down_sampling=False, is_start=False): super(Residual_DNew, self).__init__() self.down_sampling = down_sampling self.is_start = is_start self.avgpool_short = nn.AvgPool2d(2, 2, padding=1) self.conv_short = conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, spectral_normed=False) self.conv1 = conv2d(in_channels, out_channels, spectral_normed= spectral_normed, kernel_size=kernel, stride=stride, padding=1) self.conv2 = conv2d(out_channels, out_channels, spectral_normed= spectral_normed, kernel_size=kernel, stride=stride, padding=1) self.avgpool2 = nn.AvgPool2d(2, 2, padding=1) self.relu = nn.ReLU() def forward(self, input_0): primals_6 = self.conv_short.conv.weight primals_3 = self.conv_short.conv.bias primals_2 = self.conv1.conv.weight primals_5 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_7 = self.conv2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
vandit15/Self-Supervised-Gans-Pytorch
Residual_D
false
16,663
[ "MIT" ]
66
01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
https://github.com/vandit15/Self-Supervised-Gans-Pytorch/tree/01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
GumbelSoftmaxLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jd/cjdnaobowqernajenb4axacuvfjpuu3bnchwmpvbem4jevqy6y4v.py # Topologically Sorted Source Nodes: [indexes], Original ATen: [aten.argmax] # Source node to ATen node mapping: # indexes => argmax # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, -1), kwargs = {}) triton_poi_fused_argmax_0 = async_compile.triton('triton_poi_fused_argmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x0), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/if/cifak5fetarqn6wy6kapdta3ra37zxer74aicozekjrdjtc73bfd.py # Topologically Sorted Source Nodes: [scatter_], Original ATen: [aten.scatter] # Source node to ATen node mapping: # scatter_ => scatter_upon_const_tensor # Graph fragment: # %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [64, 4], background_val: 0.0, dtype: torch.float32, dim: 1, selector: %view_1, val: 1}) # %view_6 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_5, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_scatter_1 = async_compile.triton('triton_poi_fused_scatter_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_scatter_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_scatter_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(in_out_ptr0 + (x4), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [indexes], Original ATen: [aten.argmax] stream0 = get_raw_stream(0) triton_poi_fused_argmax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [scatter_], Original ATen: [aten.scatter] triton_poi_fused_scatter_1.run(buf2, buf0, 256, grid=grid(256), stream=stream0) del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.distributions import RelaxedOneHotCategorical import torch.nn.parallel import torch.utils.data import torch.distributions def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0, training: 'bool'=True, straight_through: 'bool'=False): size = logits.size() if not training: indexes = logits.argmax(dim=-1) one_hot = torch.zeros_like(logits).view(-1, size[-1]) one_hot.scatter_(1, indexes.view(-1, 1), 1) one_hot = one_hot.view(*size) return one_hot sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature ).rsample() if straight_through: size = sample.size() indexes = sample.argmax(dim=-1) hard_sample = torch.zeros_like(sample).view(-1, size[-1]) hard_sample.scatter_(1, indexes.view(-1, 1), 1) hard_sample = hard_sample.view(*size) sample = sample + (hard_sample - sample).detach() return sample class GumbelSoftmaxLayer(nn.Module): def __init__(self, temperature: 'float'=1.0, trainable_temperature: 'bool'=False, straight_through: 'bool'=False): super(GumbelSoftmaxLayer, self).__init__() self.straight_through = straight_through if not trainable_temperature: self.temperature = temperature else: self.temperature = torch.nn.Parameter(torch.tensor([temperature ]), requires_grad=True) def forward(self, logits: 'torch.Tensor'): return gumbel_softmax_sample(logits, self.temperature, self. training, self.straight_through) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.distributions import RelaxedOneHotCategorical import torch.nn.parallel import torch.utils.data import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_scatter_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(in_out_ptr0 + x4, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_scatter_1[grid(256)](buf2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return buf2, def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0, training: 'bool'=True, straight_through: 'bool'=False): size = logits.size() if not training: indexes = logits.argmax(dim=-1) one_hot = torch.zeros_like(logits).view(-1, size[-1]) one_hot.scatter_(1, indexes.view(-1, 1), 1) one_hot = one_hot.view(*size) return one_hot sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature ).rsample() if straight_through: size = sample.size() indexes = sample.argmax(dim=-1) hard_sample = torch.zeros_like(sample).view(-1, size[-1]) hard_sample.scatter_(1, indexes.view(-1, 1), 1) hard_sample = hard_sample.view(*size) sample = sample + (hard_sample - sample).detach() return sample class GumbelSoftmaxLayerNew(nn.Module): def __init__(self, temperature: 'float'=1.0, trainable_temperature: 'bool'=False, straight_through: 'bool'=False): super(GumbelSoftmaxLayerNew, self).__init__() self.straight_through = straight_through if not trainable_temperature: self.temperature = temperature else: self.temperature = torch.nn.Parameter(torch.tensor([temperature ]), requires_grad=True) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
vengalraoguttha/EGG
GumbelSoftmaxLayer
false
16,664
[ "MIT" ]
254
e4f8412f197543ec7f1f00cf89b5a364b038dc57
https://github.com/vengalraoguttha/EGG/tree/e4f8412f197543ec7f1f00cf89b5a364b038dc57
ReinforcedReceiver
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4o/c4ogi2senpwcpw6txtj5wtbczsxx4tpnaiobsgnww74a4sg5v4tv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%addmm, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mw/cmwzknalwxktanoh44ufubtjqx2krdcfovayd5pouztckog24vas.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_2 => gt, mul, where # Graph fragment: # %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ww/cwwjihozrbvgbotl7lstvjlcjsn7z2sg2hm2ibtgdzkrluulifjb.py # Topologically Sorted Source Nodes: [probs], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # probs => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (8, 8), (8, 1)) assert_size_stride(primals_6, (8, ), (1, )) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) # alias # Topologically Sorted Source Nodes: [embedded_bits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) # alias # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_4, buf1, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 8), (1, 8), 0), out=buf3) buf4 = empty_strided_cuda((4, 8), (8, 1), torch.bool) buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf3, primals_6, buf4, buf5, 32, grid=grid(32), stream=stream0) del buf3 del primals_6 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [probs], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf7, primals_8, 16, grid=grid(16), stream=stream0) del primals_8 return (buf7, buf7, primals_1, buf2, buf4, buf5, buf7, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data from torch.distributions import Bernoulli import torch.distributions class ReinforcedReceiver(nn.Module): def __init__(self, n_bits, n_hidden): super(ReinforcedReceiver, self).__init__() self.emb_column = nn.Linear(n_bits, n_hidden) self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden) self.fc2 = nn.Linear(2 * n_hidden, n_bits) def forward(self, embedded_message, bits, _aux_input=None): embedded_bits = self.emb_column(bits.float()) x = torch.cat([embedded_bits, embedded_message], dim=1) x = self.fc1(x) x = F.leaky_relu(x) x = self.fc2(x) probs = x.sigmoid() distr = Bernoulli(probs=probs) entropy = distr.entropy() if self.training: sample = distr.sample() else: sample = (probs > 0.5).float() log_prob = distr.log_prob(sample).sum(dim=1) return sample, log_prob, entropy def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_bits': 4, 'n_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (8, 8), (8, 1)) assert_size_stride(primals_6, (8,), (1,)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](primals_4, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 8), (1, 8 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 8), (8, 1), torch.bool) buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(32)](buf3, primals_6, buf4, buf5, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf3 del primals_6 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8, 4), (1, 8 ), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_sigmoid_2[grid(16)](buf7, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 return buf7, buf7, primals_1, buf2, buf4, buf5, buf7, primals_7, primals_5 class ReinforcedReceiverNew(nn.Module): def __init__(self, n_bits, n_hidden): super(ReinforcedReceiverNew, self).__init__() self.emb_column = nn.Linear(n_bits, n_hidden) self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden) self.fc2 = nn.Linear(2 * n_hidden, n_bits) def forward(self, input_0, input_1): primals_1 = self.emb_column.weight primals_3 = self.emb_column.bias primals_5 = self.fc1.weight primals_6 = self.fc1.bias primals_7 = self.fc2.weight primals_8 = self.fc2.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1], output[2]
vengalraoguttha/EGG
ReinforcedReceiver
false
16,665
[ "MIT" ]
254
e4f8412f197543ec7f1f00cf89b5a364b038dc57
https://github.com/vengalraoguttha/EGG/tree/e4f8412f197543ec7f1f00cf89b5a364b038dc57
EntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zm/czmmjr75o6d6v6vrrwewdkwjc5mjnz33cp3minlbvl3knzangojg.py # Topologically Sorted Source Nodes: [add, log, b, sum_1, b_1, b_2], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # add => add # b => mul # b_1 => mul_1 # b_2 => mean # log => log # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-12), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_add_log_mean_mul_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp10 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tmp4 + tmp8 tmp11 = tmp10 + tmp1 tmp12 = tl_math.log(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 + tmp1 tmp17 = tl_math.log(tmp16) tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = -1.0 tmp21 = tmp19 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = 64.0 tmp26 = tmp24 / tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, log, b, sum_1, b_1, b_2], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_mul_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class EntropyLoss(nn.Module): def __init__(self, eps=1e-12): super(EntropyLoss, self).__init__() self.eps = eps def forward(self, x): b = x * torch.log(x + self.eps) b = -1.0 * b.sum(dim=1) b = b.mean() return b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp5 * tmp7 tmp9 = tmp4 + tmp8 tmp11 = tmp10 + tmp1 tmp12 = tl_math.log(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 + tmp1 tmp17 = tl_math.log(tmp16) tmp18 = tmp15 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = -1.0 tmp21 = tmp19 * tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = 64.0 tmp26 = tmp24 / tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_mul_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class EntropyLossNew(nn.Module): def __init__(self, eps=1e-12): super(EntropyLossNew, self).__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
vartikagpt10/memae-anomaly-detection
EntropyLoss
false
16,666
[ "MIT" ]
297
ceece7714fb241e82ef3f3785d3d1ed86c28113e
https://github.com/vartikagpt10/memae-anomaly-detection/tree/ceece7714fb241e82ef3f3785d3d1ed86c28113e
BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7b/c7br6vyvxp4ar3p4eqjyhtqgqf2st76leiw3l2l33377wdvpxflk.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf2, primals_3, buf1, primals_5, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 del primals_5 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [alignment], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf3) return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BahdanauAttention(nn.Module): def __init__(self, annot_dim, query_dim, attn_dim): super(BahdanauAttention, self).__init__() self.query_layer = nn.Linear(query_dim, attn_dim, bias=True) self.annot_layer = nn.Linear(annot_dim, attn_dim, bias=True) self.v = nn.Linear(attn_dim, 1, bias=False) def forward(self, annots, query): """ Shapes: - annots: (batch, max_time, dim) - query: (batch, 1, dim) or (batch, dim) """ if query.dim() == 2: query = query.unsqueeze(1) processed_query = self.query_layer(query) processed_annots = self.annot_layer(annots) alignment = self.v(torch.tanh(processed_query + processed_annots)) return alignment.squeeze(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'annot_dim': 4, 'query_dim': 4, 'attn_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf2, primals_3, buf1, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_3 del primals_5 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, primals_7 class BahdanauAttentionNew(nn.Module): def __init__(self, annot_dim, query_dim, attn_dim): super(BahdanauAttentionNew, self).__init__() self.query_layer = nn.Linear(query_dim, attn_dim, bias=True) self.annot_layer = nn.Linear(annot_dim, attn_dim, bias=True) self.v = nn.Linear(attn_dim, 1, bias=False) def forward(self, input_0, input_1): primals_2 = self.query_layer.weight primals_3 = self.query_layer.bias primals_4 = self.annot_layer.weight primals_5 = self.annot_layer.bias primals_7 = self.v.weight primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
vigilancetrent/chatbot-advanced
BahdanauAttention
false
16,667
[ "Apache-2.0" ]
52
2e0c72c4df2e1434da995b7105f8f0414aba6248
https://github.com/vigilancetrent/chatbot-advanced/tree/2e0c72c4df2e1434da995b7105f8f0414aba6248
Interpolate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czk6raopzmdgdv6d5hvyxoik5jp7pujsiqh34ikcwzvkv5yezwe7.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 1.0), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(in_out_ptr0 + (x4), tmp38, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Interpolate(nn.Module): def __init__(self, scale_factor, mode='bilinear', align_corners=True): super(Interpolate, self).__init__() self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale_factor': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(in_out_ptr0 + x4, tmp38, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (256)](buf1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf1, class InterpolateNew(nn.Module): def __init__(self, scale_factor, mode='bilinear', align_corners=True): super(InterpolateNew, self).__init__() self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
vietnhatthai/3d-vehicle-tracking
Interpolate
false
16,668
[ "BSD-3-Clause" ]
603
8ee189f6792897651bb56bb2950ce07c9629a89d
https://github.com/vietnhatthai/3d-vehicle-tracking/tree/8ee189f6792897651bb56bb2950ce07c9629a89d