entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
Scaled_Dot_Product_Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/wz/cwzlgmghy6nxuchbiog4puo46i4tq7yhd3qu6ftkgjf3gwib6hxn.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attention_1, context], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return buf3, class Scaled_Dot_Product_AttentionNew(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_AttentionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Ch4ndelier/Transformer_Zero_Velocity_classification
Scaled_Dot_Product_Attention
false
17,076
[ "MIT" ]
6
857efb66189c503e983c11bd7dde16ad19c51ada
https://github.com/Ch4ndelier/Transformer_Zero_Velocity_classification/tree/857efb66189c503e983c11bd7dde16ad19c51ada
HardSigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5n/c5nj5rxxk4mfkd6svl7rhvrjhy7jprzrjnxredq5i3z22x56xmt6.py # Topologically Sorted Source Nodes: [hardtanh, add, truediv], Original ATen: [aten.hardtanh, aten.add, aten.div] # Source node to ATen node mapping: # add => add # hardtanh => clamp_max, clamp_min # truediv => div # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, -1.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2.0), kwargs = {}) triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp4 + tmp3 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hardtanh, add, truediv], Original ATen: [aten.hardtanh, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class HardSigmoid(nn.Module): def __init__(self): super(HardSigmoid, self).__init__() self.act = nn.Hardtanh() def forward(self, x): return (self.act(x) + 1.0) / 2.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp4 + tmp3 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardSigmoidNew(nn.Module): def __init__(self): super(HardSigmoidNew, self).__init__() self.act = nn.Hardtanh() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Chandrima-04/gimmebio
HardSigmoid
false
17,077
[ "MIT" ]
3
cb3e66380006d5c5c00ff70bfb87317dd252c312
https://github.com/Chandrima-04/gimmebio/tree/cb3e66380006d5c5c00ff70bfb87317dd252c312
Normalize3D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/wf/cwf4j7cstno26sspvjvpjsbtr6u2vibzphiejmdubcowupcvswt3.py # Topologically Sorted Source Nodes: [max_1, min_1, sub, sub_1, add, X_1], Original ATen: [aten.max, aten.min, aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # X_1 => div # add => add # max_1 => max_1 # min_1 => min_1 # sub => sub # sub_1 => sub_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {}) # %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view, 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %unsqueeze_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) triton_per_fused_add_div_max_min_sub_0 = async_compile.triton('triton_per_fused_add_div_max_min_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_max_min_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_max_min_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp6 = tl.where(xmask, tmp1, float("inf")) tmp7 = triton_helpers.min2(tmp6, 1)[:, None] tmp8 = tmp0 - tmp7 tmp9 = tmp4 - tmp7 tmp10 = 1e-10 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr2 + (r1 + (16*x0)), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1, min_1, sub, sub_1, add, X_1], Original ATen: [aten.max, aten.min, aten.sub, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_max_min_sub_0.run(arg0_1, buf4, 4, 16, grid=grid(4), stream=stream0) del arg0_1 return (reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Normalize3D(nn.Module): """ Scale Spectrogram to be between 0 and 1 """ def __init__(self): super(Normalize3D, self).__init__() def forward(self, X: 'torch.Tensor'): if len(X.shape) != 3: raise ValueError( 'Input should be 3D: [batch_size X num_features X num_steps]') batch_size, num_features, num_steps = X.shape X = X.contiguous().view(batch_size, num_features * num_steps) max_value = torch.max(X, dim=1)[0].detach() min_value = torch.min(X, dim=1)[0].detach() max_value = torch.unsqueeze(max_value, 1) min_value = torch.unsqueeze(min_value, 1) X = (X - min_value) / (max_value - min_value + 1e-10) return X.view(batch_size, num_features, num_steps) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_max_min_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp6 = tl.where(xmask, tmp1, float('inf')) tmp7 = triton_helpers.min2(tmp6, 1)[:, None] tmp8 = tmp0 - tmp7 tmp9 = tmp4 - tmp7 tmp10 = 1e-10 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr2 + (r1 + 16 * x0), tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_max_min_sub_0[grid(4)](arg0_1, buf4, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), class Normalize3DNew(nn.Module): """ Scale Spectrogram to be between 0 and 1 """ def __init__(self): super(Normalize3DNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CiscoDevNet/vo-id
Normalize3D
false
17,079
[ "MIT" ]
7
9a01f866c7539a9cd095d9627ba4f65ad540ea6b
https://github.com/CiscoDevNet/vo-id/tree/9a01f866c7539a9cd095d9627ba4f65ad540ea6b
SplAtConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/z3/cz3jfcw4y2re3tdyassi7mdheikebszewzeeysm6bgphf7mwoh2i.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tz/ctzdsw3cvnseq6vyprae2elq7j7wjibso2gvtcpvvadonxe5nxlc.py # Topologically Sorted Source Nodes: [add, gap, gap_1], Original ATen: [aten.add, aten.mean] # Source node to ATen node mapping: # add => add # gap => add_1 # gap_1 => mean # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %getitem_5), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1, -2], True), kwargs = {}) triton_poi_fused_add_mean_1 = async_compile.triton('triton_poi_fused_add_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dh/cdhpfpcyouoqg7is5bawev566to6yzkoujcoi5uacgny26m5blrb.py # Topologically Sorted Source Nodes: [gap_2, gap_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # gap_2 => convolution_1 # gap_3 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4f/c4fvjwpx5x572kyfkojpxrrcyn6hafn35lqpnylrcya6brsbgyy4.py # Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution] # Source node to ATen node mapping: # atten => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ls/clsu6u2gbvexk6siba2lfnusdkjyoirqleakpytth3d7aarxr35m.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_3 => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%permute, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 8) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (8*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (8*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/n4/cn4ossb3mdpsj2li5bd2q7ggfn4dciouepwnnxzf2hkyi6eppb6j.py # Topologically Sorted Source Nodes: [mul, mul_1, add_2, out], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # mul => mul # mul_1 => mul_1 # out => add_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_6, %getitem_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_7, %getitem_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_1), kwargs = {}) triton_poi_fused_add_mul_5 = async_compile.triton('triton_poi_fused_add_mul_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (8*x1)), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp6 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask) tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp8 = tmp4 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None) assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf9, 32, grid=grid(32), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [add, gap, gap_1], Original ATen: [aten.add, aten.mean] triton_poi_fused_add_mean_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [gap_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [gap_2, gap_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 128, grid=grid(128), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf6, primals_7, 32, grid=grid(32), stream=stream0) del primals_7 buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 32, grid=grid(32), stream=stream0) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, add_2, out], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_5.run(buf7, buf1, buf8, 16, grid=grid(16), stream=stream0) return (buf8, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Conv2d from torch.nn import ReLU from torch.nn.modules.utils import _pair class DropBlock2D(object): def __init__(self, *args, **kwargs): raise NotImplementedError class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv2d(Module): """Split-Attention Conv2d """ def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer= None, dropblock_prob=0.0, **kwargs): super(SplAtConv2d, self).__init__() padding = _pair(padding) self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm_layer is not None if self.use_bn: self.bn0 = norm_layer(channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = norm_layer(inter_channels) self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock2D(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn0(x) if self.dropblock_prob > 0.0: x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: if torch.__version__ < '1.5': splited = torch.split(x, int(rchannel // self.radix), dim=1) else: splited = torch.split(x, rchannel // self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) if self.use_bn: gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: if torch.__version__ < '1.5': attens = torch.split(atten, int(rchannel // self.radix), dim=1) else: attens = torch.split(atten, rchannel // self.radix, dim=1) out = sum([(att * split) for att, split in zip(attens, splited)]) else: out = atten * x return out.contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.nn.functional as F import torch.nn as nn from torch.nn import Conv2d from torch.nn import ReLU from torch.nn.modules.utils import _pair assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp6 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp8 = tmp4 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None) assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0) del buf0 buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(32)](buf1, primals_2, buf9, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_mean_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(128)](buf4, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_3[grid(32)](buf6, primals_7, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_7 buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(32)](buf6, buf7, 32, XBLOCK=32, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_mul_5[grid(16)](buf7, buf1, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) return (buf8, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9) class DropBlock2D(object): def __init__(self, *args, **kwargs): raise NotImplementedError class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv2dNew(Module): """Split-Attention Conv2d """ def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer= None, dropblock_prob=0.0, **kwargs): super(SplAtConv2dNew, self).__init__() padding = _pair(padding) self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm_layer is not None if self.use_bn: self.bn0 = norm_layer(channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = norm_layer(inter_channels) self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock2D(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CVI-SZU/CLIMS
SplAtConv2d
false
17,080
[ "MIT" ]
4
9d3d0123b625b2c6941069e8fb359019a5cabd59
https://github.com/CVI-SZU/CLIMS/tree/9d3d0123b625b2c6941069e8fb359019a5cabd59
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kq/ckqglep2uxxmtpvso6erxkbhcljhsx4d2chzqc2ps72gtorqejiz.py # Topologically Sorted Source Nodes: [pred_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # pred_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tv/ctvxncyzu7pcogaepwuywlkrtigk7ke4tdgbkdfypvu626jp7xvj.py # Topologically Sorted Source Nodes: [pred_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # pred_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lh/clh6qtrfrv2qeyulm6shsyni25pyfyg6oy7vuy3lccexxs2s4ssp.py # Topologically Sorted Source Nodes: [mul, sum_1, mul_1, num, sum_2, sum_3, add_1, den, truediv, loss, loss_1, mul_2, sum_4, mul_3, num_1, sum_5, sum_6, add_5, den_1, truediv_1, loss_2, loss_3, mul_4, sum_7, mul_5, num_2, sum_8, sum_9, add_8, den_2, truediv_2, loss_4, loss_5], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add_1 => add_1 # add_5 => add_5 # add_8 => add_9 # den => add_2 # den_1 => add_6 # den_2 => add_10 # loss => sub_1 # loss_1 => add_3 # loss_2 => sub_2 # loss_3 => add_7 # loss_4 => sub_3 # loss_5 => add_11 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # num => add # num_1 => add_4 # num_2 => add_8 # sum_1 => sum_2 # sum_2 => sum_3 # sum_3 => sum_4 # sum_4 => sum_5 # sum_5 => sum_6 # sum_6 => sum_7 # sum_7 => sum_8 # sum_8 => sum_9 # sum_9 => sum_10 # truediv => div_1 # truediv_1 => div_2 # truediv_2 => div_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %sum_4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-05), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 0.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_3), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_5, 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1e-05), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_2, [1]), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_3, [1]), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_6, %sum_7), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, 1e-05), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %sub_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %view_5), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 2), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1e-05), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_4, [1]), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_5, [1]), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, %sum_10), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, 1e-05), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_3), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %sub_3), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_2 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 9, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp15 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp16 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp30 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp17 = tmp15 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp24 = tl.where(xmask, tmp22, 0) tmp25 = tl.sum(tmp24, 1)[:, None] tmp26 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp32 = tmp30 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp37 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp39 = tl.where(xmask, tmp37, 0) tmp40 = tl.sum(tmp39, 1)[:, None] tmp41 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp43 = tl.where(xmask, tmp41, 0) tmp44 = tl.sum(tmp43, 1)[:, None] tmp45 = 2.0 tmp46 = tmp6 * tmp45 tmp47 = 1e-05 tmp48 = tmp46 + tmp47 tmp49 = tmp10 + tmp14 tmp50 = tmp49 + tmp47 tmp51 = tmp48 / tmp50 tmp52 = 1.0 tmp53 = tmp52 - tmp51 tmp54 = 0.0 tmp55 = tmp53 + tmp54 tmp56 = tmp21 * tmp45 tmp57 = tmp56 + tmp47 tmp58 = tmp25 + tmp29 tmp59 = tmp58 + tmp47 tmp60 = tmp57 / tmp59 tmp61 = tmp52 - tmp60 tmp62 = tmp55 + tmp61 tmp63 = tmp36 * tmp45 tmp64 = tmp63 + tmp47 tmp65 = tmp40 + tmp44 tmp66 = tmp65 + tmp47 tmp67 = tmp64 / tmp66 tmp68 = tmp52 - tmp67 tmp69 = tmp62 + tmp68 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp69, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7y/c7yc7hszo74n4esfofyp6ygmrmijtyolamvs7562oxpqwvdxgrqr.py # Topologically Sorted Source Nodes: [mul_6, sum_10, sum_11, sum_12], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul_6 => mul_6 # sum_10 => sum_11 # sum_11 => sum_12 # sum_12 => sum_13 # Graph fragment: # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %view_7), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1]), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_6, [1]), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_7, [1]), kwargs = {}) triton_per_fused_mul_sum_3 = async_compile.triton('triton_per_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) tl.store(out_ptr2 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wb/cwb2m2pdcaqcokyboscssn4leosf2wraomvn2bjfxmlfnzzgwp6a.py # Topologically Sorted Source Nodes: [mul_7, num_3, add_11, den_3, truediv_3, loss_6, loss_7, loss_8, loss_9, loss_cls], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean] # Source node to ATen node mapping: # add_11 => add_13 # den_3 => add_14 # loss_6 => sub_4 # loss_7 => add_15 # loss_8 => div_5 # loss_9 => mean # loss_cls => mul_8 # mul_7 => mul_7 # num_3 => add_12 # truediv_3 => div_4 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_11, 2), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1e-05), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_12, %sum_13), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, 1e-05), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %sub_4), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_5,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_4 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp7 = tl.load(in_ptr3 + (r0), None) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp9 = tmp8 + tmp4 tmp10 = tmp5 / tmp9 tmp11 = 1.0 tmp12 = tmp11 - tmp10 tmp13 = tmp0 + tmp12 tmp14 = 0.25 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp20 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 buf10 = empty_strided_cuda((4, ), (1, ), torch.float32) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [mul, sum_1, mul_1, num, sum_2, sum_3, add_1, den, truediv, loss, loss_1, mul_2, sum_4, mul_3, num_1, sum_5, sum_6, add_5, den_1, truediv_1, loss_2, loss_3, mul_4, sum_7, mul_5, num_2, sum_8, sum_9, add_8, den_2, truediv_2, loss_4, loss_5], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] triton_per_fused_add_div_mul_rsub_sum_2.run(buf11, buf1, arg1_1, 4, 16, grid=grid(4), stream=stream0) buf12 = empty_strided_cuda((4, ), (1, ), torch.float32) buf13 = empty_strided_cuda((4, ), (1, ), torch.float32) buf14 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_6, sum_10, sum_11, sum_12], Original ATen: [aten.mul, aten.sum] triton_per_fused_mul_sum_3.run(buf1, arg1_1, buf12, buf13, buf14, 4, 16, grid=grid(4), stream=stream0) del arg1_1 del buf1 buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [mul_7, num_3, add_11, den_3, truediv_3, loss_6, loss_7, loss_8, loss_9, loss_cls], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean] triton_per_fused_add_div_mean_mul_rsub_4.run(buf16, buf11, buf12, buf13, buf14, 1, 4, grid=grid(1), stream=stream0) del buf11 del buf12 del buf13 del buf14 return (buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization def binary_dice_loss(pred, label, smooth=1e-05): """ :param pred: [N, *]: here should be scores in [0,1] :param label: [N, *] :param power: 1 for abs, 2 for square :return: [N] """ pred = pred.contiguous().view(pred.shape[0], -1).float() label = label.contiguous().view(label.shape[0], -1).float() num = 2 * torch.sum(torch.mul(pred, label), dim=1) + smooth den = torch.sum(pred, dim=1) + torch.sum(label, dim=1) + smooth loss = 1 - num / den return loss def _make_one_hot(label, num_classes): """ :param label: [N, *], values in [0,num_classes) :return: [N, C, *] """ label = label.unsqueeze(1) shape = list(label.shape) shape[1] = num_classes result = torch.zeros(shape, device=label.device) result.scatter_(1, label, 1) return result def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def dice_loss(pred_raw, label_raw, weight=None, class_weight=None, reduction='mean', avg_factor=None, ignore_class=-1, smooth=1e-05): """ :param pred: [N, C, *]scores without softmax :param label: [N, *] :return: reduction([N]) """ pred = pred_raw.clone() label = label_raw.clone() num_classes = pred.shape[1] if class_weight is not None: class_weight = class_weight.float() if pred.shape != label.shape: label = _make_one_hot(label, num_classes) pred = F.softmax(pred, dim=1) loss = 0.0 for i in range(num_classes): if i != ignore_class: dice_loss = binary_dice_loss(pred[:, i], label[:, i], smooth) if class_weight is not None: dice_loss *= class_weight[i] loss += dice_loss if ignore_class != -1: num_classes -= 1 loss = loss / num_classes loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class DiceLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0, ignore_class=-1, smooth=1e-05): super(DiceLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.cls_criterion = dice_loss self.ignore_class = ignore_class self.smooth = smooth def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function.""" assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = cls_score.new_tensor(self.class_weight) assert class_weight.shape[0] == label.shape[1 ], 'Expect weight shape [{}], get[{}]'.format(label.shape[1 ], class_weight.shape[0]) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_class=self.ignore_class, smooth= self.smooth) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp15 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp16 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp30 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp17 = tmp15 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp24 = tl.where(xmask, tmp22, 0) tmp25 = tl.sum(tmp24, 1)[:, None] tmp26 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp32 = tmp30 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp37 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp39 = tl.where(xmask, tmp37, 0) tmp40 = tl.sum(tmp39, 1)[:, None] tmp41 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp43 = tl.where(xmask, tmp41, 0) tmp44 = tl.sum(tmp43, 1)[:, None] tmp45 = 2.0 tmp46 = tmp6 * tmp45 tmp47 = 1e-05 tmp48 = tmp46 + tmp47 tmp49 = tmp10 + tmp14 tmp50 = tmp49 + tmp47 tmp51 = tmp48 / tmp50 tmp52 = 1.0 tmp53 = tmp52 - tmp51 tmp54 = 0.0 tmp55 = tmp53 + tmp54 tmp56 = tmp21 * tmp45 tmp57 = tmp56 + tmp47 tmp58 = tmp25 + tmp29 tmp59 = tmp58 + tmp47 tmp60 = tmp57 / tmp59 tmp61 = tmp52 - tmp60 tmp62 = tmp55 + tmp61 tmp63 = tmp36 * tmp45 tmp64 = tmp63 + tmp47 tmp65 = tmp40 + tmp44 tmp66 = tmp65 + tmp47 tmp67 = tmp64 / tmp66 tmp68 = tmp52 - tmp67 tmp69 = tmp62 + tmp68 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp69, xmask) @triton.jit def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) tl.store(out_ptr2 + x0, tmp14, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp7 = tl.load(in_ptr3 + r0, None) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp9 = tmp8 + tmp4 tmp10 = tmp5 / tmp9 tmp11 = 1.0 tmp12 = tmp11 - tmp10 tmp13 = tmp0 + tmp12 tmp14 = 0.25 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = tmp20 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf10 = empty_strided_cuda((4,), (1,), torch.float32) buf11 = buf10 del buf10 triton_per_fused_add_div_mul_rsub_sum_2[grid(4)](buf11, buf1, arg1_1, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf12 = empty_strided_cuda((4,), (1,), torch.float32) buf13 = empty_strided_cuda((4,), (1,), torch.float32) buf14 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_3[grid(4)](buf1, arg1_1, buf12, buf13, buf14, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf1 buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15 del buf15 triton_per_fused_add_div_mean_mul_rsub_4[grid(1)](buf16, buf11, buf12, buf13, buf14, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf11 del buf12 del buf13 del buf14 return buf16, def binary_dice_loss(pred, label, smooth=1e-05): """ :param pred: [N, *]: here should be scores in [0,1] :param label: [N, *] :param power: 1 for abs, 2 for square :return: [N] """ pred = pred.contiguous().view(pred.shape[0], -1).float() label = label.contiguous().view(label.shape[0], -1).float() num = 2 * torch.sum(torch.mul(pred, label), dim=1) + smooth den = torch.sum(pred, dim=1) + torch.sum(label, dim=1) + smooth loss = 1 - num / den return loss def _make_one_hot(label, num_classes): """ :param label: [N, *], values in [0,num_classes) :return: [N, C, *] """ label = label.unsqueeze(1) shape = list(label.shape) shape[1] = num_classes result = torch.zeros(shape, device=label.device) result.scatter_(1, label, 1) return result def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def dice_loss(pred_raw, label_raw, weight=None, class_weight=None, reduction='mean', avg_factor=None, ignore_class=-1, smooth=1e-05): """ :param pred: [N, C, *]scores without softmax :param label: [N, *] :return: reduction([N]) """ pred = pred_raw.clone() label = label_raw.clone() num_classes = pred.shape[1] if class_weight is not None: class_weight = class_weight.float() if pred.shape != label.shape: label = _make_one_hot(label, num_classes) pred = F.softmax(pred, dim=1) loss = 0.0 for i in range(num_classes): if i != ignore_class: dice_loss = binary_dice_loss(pred[:, i], label[:, i], smooth) if class_weight is not None: dice_loss *= class_weight[i] loss += dice_loss if ignore_class != -1: num_classes -= 1 loss = loss / num_classes loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class DiceLossNew(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0, ignore_class=-1, smooth=1e-05): super(DiceLossNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.cls_criterion = dice_loss self.ignore_class = ignore_class self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CVIU-CSU/M2MRF-Lesion-Segmentation
DiceLoss
false
17,081
[ "Apache-2.0" ]
10
13af87927f4cdeca70e35d570edd1aec43b387b6
https://github.com/CVIU-CSU/M2MRF-Lesion-Segmentation/tree/13af87927f4cdeca70e35d570edd1aec43b387b6
GlobalAveragePooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/e7/ce73hyb6fl47lsvuo6oc4nyc7nbjn2cooo36plrte4gsotp7fcxm.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class GlobalAveragePooling(nn.Module): def __init__(self): super(GlobalAveragePooling, self).__init__() def forward(self, feat): num_channels = feat.size(1) return F.avg_pool2d(feat, (feat.size(2), feat.size(3))).view(-1, num_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalAveragePoolingNew(nn.Module): def __init__(self): super(GlobalAveragePoolingNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CVPR2020/EnAET
GlobalAveragePooling
false
17,082
[ "MIT" ]
3
f490777980d20c68ca63764b7fc25537d7e72660
https://github.com/CVPR2020/EnAET/tree/f490777980d20c68ca63764b7fc25537d7e72660
Rescale
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/jv/cjvydnuys7zy6wmxieegntfvfdzw5o74mw7dzc5gjcud6ph5j5ko.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mul] # Source node to ATen node mapping: # x => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class Rescale(nn.Module): """Per-channel rescaling. Need a proper `nn.Module` so we can wrap it with `torch.nn.utils.weight_norm`. Args: num_channels (int): Number of channels in the input. """ def __init__(self, num_channels): super(Rescale, self).__init__() self.weight = nn.Parameter(torch.ones(num_channels, 1, 1)) def forward(self, x): x = self.weight * x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class RescaleNew(nn.Module): """Per-channel rescaling. Need a proper `nn.Module` so we can wrap it with `torch.nn.utils.weight_norm`. Args: num_channels (int): Number of channels in the input. """ def __init__(self, num_channels): super(RescaleNew, self).__init__() self.weight = nn.Parameter(torch.ones(num_channels, 1, 1)) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Catherine0505/mar-scf-flow
Rescale
false
17,083
[ "Apache-2.0" ]
10
aa7c3564cb9f2967c5e580a633516dba1b597f98
https://github.com/Catherine0505/mar-scf-flow/tree/aa7c3564cb9f2967c5e580a633516dba1b597f98
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ag/caggqh3nrqirp3azykpm6daivhojwggbhob6pnubsheittrjeiky.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qw/cqw2ahskucrmwkbadqswj76db4kfcxn5w76kji7peia7kwu4zbkq.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1.0), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qp/cqpcw6a75lddrj3p3ak2pp34lgd6te4lx66bbp6aivrrp4vkrped.py # Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_2 => mul_2 # weight => mul_3 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yt/cytz7gc3g33dnf27y4cxja7oo23yxivujqbpdwfrcbef5wkinklz.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] # Source node to ATen node mapping: # out_3 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf5, primals_6, 192, grid=grid(192), stream=stream0) del primals_6 return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch.nn import functional as F from torch import nn def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def downsample(images, size=256): if images.shape[2] > size: factor = images.shape[2] // size assert factor * size == images.shape[2] images = images.view([-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) images = images.mean(dim=[3, 5]) return images else: assert images.shape[-1] == 256 return images def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.reshape(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch.nn import functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def downsample(images, size=256): if images.shape[2] > size: factor = images.shape[2] // size assert factor * size == images.shape[2] images = images.view([-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) images = images.mean(dim=[3, 5]) return images else: assert images.shape[-1] == 256 return images def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.reshape(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
BillyXYB/TransEditor
ToRGB
false
17,084
[ "MIT" ]
4
0194cd6f0e96c801d55c0cb9683e1f552bcf6d48
https://github.com/BillyXYB/TransEditor/tree/0194cd6f0e96c801d55c0cb9683e1f552bcf6d48
BinaryLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/7s/c7sgntrkx7eolttcy4chpwzcn4u5uahelojkqdaozkcw7aximbml.py # Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten.binary_cross_entropy, aten.mean] # Source node to ATen node mapping: # loss => full_default, full_default_1, log, log1p, maximum, maximum_1, mul, mul_1, neg, sub, sub_1 # loss_1 => mean # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_1, [1, 2]), kwargs = {}) triton_per_fused_binary_cross_entropy_mean_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/d4/cd4cs4sumcwo4y7fjw3vo4qui7uwxm52t6gvizyk3wvbrxvbe3fs.py # Topologically Sorted Source Nodes: [loss_3, loss_4], Original ATen: [aten.binary_cross_entropy, aten.mean] # Source node to ATen node mapping: # loss_3 => full_default_2, full_default_3, log1p_1, log_1, maximum_2, maximum_3, mul_2, mul_3, neg_1, sub_2, sub_3 # loss_4 => mean_1 # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_3, 1), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_2,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg_1,), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_2 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p_1, %full_default_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %maximum_2), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select_2,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_3 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log_1, %full_default_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %maximum_3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_3, [1, 2]), kwargs = {}) triton_per_fused_binary_cross_entropy_mean_1 = async_compile.triton('triton_per_fused_binary_cross_entropy_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/y4/cy4wchplhpqh2b5xlnty3wplc5rp4mg2syyk7qcu5iubyxvr4bu5.py # Topologically Sorted Source Nodes: [loss_6, loss_7], Original ATen: [aten.binary_cross_entropy, aten.mean] # Source node to ATen node mapping: # loss_6 => full_default_4, full_default_5, log1p_2, log_2, maximum_4, maximum_5, mul_4, mul_5, neg_2, sub_4, sub_5 # loss_7 => mean_2 # Graph fragment: # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_5, 1), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_4,), kwargs = {}) # %log1p_2 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg_2,), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_4 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p_2, %full_default_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %maximum_4), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select_4,), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_5 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log_2, %full_default_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %maximum_5), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_5, [1, 2]), kwargs = {}) triton_per_fused_binary_cross_entropy_mean_2 = async_compile.triton('triton_per_fused_binary_cross_entropy_mean_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mean_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_mean_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6y/c6yzbmpj3ox2t6tnlrvknh4t34veimklgekk34gaa5747x4ukefx.py # Topologically Sorted Source Nodes: [loss_9, loss_10], Original ATen: [aten.binary_cross_entropy, aten.mean] # Source node to ATen node mapping: # loss_10 => mean_3 # loss_9 => full_default_6, full_default_7, log1p_3, log_3, maximum_6, maximum_7, mul_6, mul_7, neg_3, sub_6, sub_7 # Graph fragment: # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_7, 1), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_6,), kwargs = {}) # %log1p_3 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg_3,), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_6 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p_3, %full_default_6), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %maximum_6), kwargs = {}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select_6,), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_7 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log_3, %full_default_7), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, %maximum_7), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_7, [1, 2]), kwargs = {}) triton_per_fused_binary_cross_entropy_mean_3 = async_compile.triton('triton_per_fused_binary_cross_entropy_mean_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_mean_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/36/c36v75w6bb3r7j3tkembreqcjyab2ddgodhxoxcjdcf5ntdtsmsc.py # Topologically Sorted Source Nodes: [loss, loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9, loss_10, loss_11, loss_12, loss_13, loss_cls], Original ATen: [aten.binary_cross_entropy, aten.mean, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # loss => full_default, full_default_1, log, log1p, maximum, maximum_1, mul, mul_1, neg, sub, sub_1 # loss_1 => mean # loss_10 => mean_3 # loss_11 => add_3 # loss_12 => div # loss_13 => mean_4 # loss_2 => add # loss_3 => full_default_2, full_default_3, log1p_1, log_1, maximum_2, maximum_3, mul_2, mul_3, neg_1, sub_2, sub_3 # loss_4 => mean_1 # loss_5 => add_1 # loss_6 => full_default_4, full_default_5, log1p_2, log_2, maximum_4, maximum_5, mul_4, mul_5, neg_2, sub_4, sub_5 # loss_7 => mean_2 # loss_8 => add_2 # loss_9 => full_default_6, full_default_7, log1p_3, log_3, maximum_6, maximum_7, mul_6, mul_7, neg_3, sub_6, sub_7 # loss_cls => mul_8 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_1, [1, 2]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0.0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_3, 1), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_2,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg_1,), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_2 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p_1, %full_default_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %maximum_2), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select_2,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_3 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log_1, %full_default_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %maximum_3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_3, [1, 2]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mean_1), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_5, 1), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_4,), kwargs = {}) # %log1p_2 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg_2,), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_4 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p_2, %full_default_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %maximum_4), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select_4,), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_5 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log_2, %full_default_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %maximum_5), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_5, [1, 2]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mean_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_7, 1), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_6,), kwargs = {}) # %log1p_3 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg_3,), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_6 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p_3, %full_default_6), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %maximum_6), kwargs = {}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%select_6,), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_7 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log_3, %full_default_7), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, %maximum_7), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_7, [1, 2]), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mean_3), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4), kwargs = {}) # %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_4, 1.0), kwargs = {}) triton_per_fused_add_binary_cross_entropy_div_mean_mul_4 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_div_mean_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_div_mean_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mean_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp8 = tl.load(in_ptr2 + (r0), None) tmp11 = tl.load(in_ptr3 + (r0), None) tmp1 = 16.0 tmp2 = tmp0 / tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp6 = tmp5 / tmp1 tmp7 = tmp4 + tmp6 tmp9 = tmp8 / tmp1 tmp10 = tmp7 + tmp9 tmp12 = tmp11 / tmp1 tmp13 = tmp10 + tmp12 tmp14 = 0.25 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = 1.0 tmp22 = tmp20 * tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten.binary_cross_entropy, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_mean_0.run(arg1_1, arg0_1, buf0, 4, 16, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [loss_3, loss_4], Original ATen: [aten.binary_cross_entropy, aten.mean] triton_per_fused_binary_cross_entropy_mean_1.run(arg1_1, arg0_1, buf1, 4, 16, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [loss_6, loss_7], Original ATen: [aten.binary_cross_entropy, aten.mean] triton_per_fused_binary_cross_entropy_mean_2.run(arg1_1, arg0_1, buf2, 4, 16, grid=grid(4), stream=stream0) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [loss_9, loss_10], Original ATen: [aten.binary_cross_entropy, aten.mean] triton_per_fused_binary_cross_entropy_mean_3.run(arg1_1, arg0_1, buf3, 4, 16, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [loss, loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9, loss_10, loss_11, loss_12, loss_13, loss_cls], Original ATen: [aten.binary_cross_entropy, aten.mean, aten.add, aten.div, aten.mul] triton_per_fused_add_binary_cross_entropy_div_mean_mul_4.run(buf5, buf0, buf1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization def binary_cbce_loss(pred, label, **kwargs): """ :param pred: [N, *]: here should be scores in [0,1] :param label: [N, *]: values in [0,1] :return: [N] """ mask = (label > 0.5).float() b, h, w = mask.shape num_pos = torch.sum(mask, dim=[1, 2]).float() num_neg = h * w - num_pos weight = torch.zeros_like(mask) pos_weight = num_neg / (num_pos + num_neg) neg_weight = num_pos / (num_pos + num_neg) for i in range(b): weight[i][label[i] > 0.5] = pos_weight[i] weight[i][label[i] <= 0.5] = neg_weight[i] loss = torch.nn.functional.binary_cross_entropy(pred.float(), label. float(), weight=weight, reduction='none') return loss def binary_ce_loss(pred, label, **kwargs): loss = F.binary_cross_entropy(pred, label, reduction='none') loss = torch.mean(loss, dim=(1, 2)) return loss def binary_dice_loss(pred, label, smooth=1e-05): """ :param pred: [N, *]: here should be scores in [0,1] :param label: [N, *] :param power: 1 for abs, 2 for square :return: [N] """ pred = pred.contiguous().view(pred.shape[0], -1).float() label = label.contiguous().view(label.shape[0], -1).float() num = 2 * torch.sum(torch.mul(pred, label), dim=1) + smooth den = torch.sum(pred, dim=1) + torch.sum(label, dim=1) + smooth loss = 1 - num / den return loss def binary_ce_dice_loss(pred, label, smooth=1.0, **kwargs): loss1 = binary_ce_loss(pred, label, **kwargs) loss2 = binary_dice_loss(pred, label, smooth=smooth) return loss1 + loss2 def _make_one_hot(label, num_classes): """ :param label: [N, *], values in [0,num_classes) :return: [N, C, *] """ label = label.unsqueeze(1) shape = list(label.shape) shape[1] = num_classes result = torch.zeros(shape, device=label.device) result.scatter_(1, label, 1) return result def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_loss(pred_raw, label_raw, loss_func, weight=None, class_weight= None, class_weight_norm=False, reduction='mean', avg_factor=None, smooth=1.0, **kwargs): """ :param pred: [N, C, *] scores without softmax :param label: [N, *] in [0, C], 0 stands for background, 1~C stands for pred in 0~C-1 :return: reduction([N]) """ pred = pred_raw.clone() label = label_raw.clone() num_classes = pred.shape[1] if class_weight is not None: class_weight = class_weight.float() if pred.shape != label.shape: label = _make_one_hot(label, num_classes) pred = torch.sigmoid(pred) loss = 0.0 for i in range(num_classes): if isinstance(loss_func, tuple): loss_function = loss_func[i] else: loss_function = loss_func class_loss = loss_function(pred[:, i], label[:, i], smooth=smooth) if class_weight is not None: class_loss *= class_weight[i] loss += class_loss if class_weight is not None and class_weight_norm: loss = loss / torch.sum(class_weight) else: loss = loss / num_classes loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class BinaryLoss(nn.Module): def __init__(self, loss_type='ce', reduction='mean', class_weight=None, class_weight_norm=False, loss_weight=1.0, smooth=1.0, **kwargs): super(BinaryLoss, self).__init__() assert loss_type in ['ce', 'dice', 'cbce', 'ce_dice', 'mix'] self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.class_weight_norm = class_weight_norm self.loss_type = loss_type self.smooth = smooth def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = cls_score.new_tensor(self.class_weight) assert class_weight.shape[0] == cls_score.shape[1 ], 'Expect weight shape [{}], get[{}]'.format(cls_score. shape[1], class_weight.shape[0]) else: class_weight = None loss_func = None if self.loss_type == 'ce': loss_func = binary_ce_loss elif self.loss_type == 'dice': loss_func = binary_dice_loss elif self.loss_type == 'ce_dice': loss_func = binary_ce_dice_loss elif self.loss_type == 'mix': loss_func = (binary_ce_loss, binary_ce_loss, binary_ce_loss, binary_dice_loss) elif self.loss_type == 'cbce': loss_func = binary_cbce_loss loss_cls = self.loss_weight * binary_loss(cls_score, label, loss_func, weight, class_weight=class_weight, class_weight_norm =self.class_weight_norm, reduction=reduction, avg_factor= avg_factor, smooth=self.smooth) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_per_fused_binary_cross_entropy_mean_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_per_fused_binary_cross_entropy_mean_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_per_fused_binary_cross_entropy_mean_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mean_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_ptr2 + r0, None) tmp11 = tl.load(in_ptr3 + r0, None) tmp1 = 16.0 tmp2 = tmp0 / tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp6 = tmp5 / tmp1 tmp7 = tmp4 + tmp6 tmp9 = tmp8 / tmp1 tmp10 = tmp7 + tmp9 tmp12 = tmp11 / tmp1 tmp13 = tmp10 + tmp12 tmp14 = 0.25 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = 1.0 tmp22 = tmp20 * tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_mean_0[grid(4)](arg1_1, arg0_1, buf0, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_binary_cross_entropy_mean_1[grid(4)](arg1_1, arg0_1, buf1, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_binary_cross_entropy_mean_2[grid(4)](arg1_1, arg0_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_binary_cross_entropy_mean_3[grid(4)](arg1_1, arg0_1, buf3, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_add_binary_cross_entropy_div_mean_mul_4[grid(1)](buf5, buf0, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 del buf3 return buf5, def binary_cbce_loss(pred, label, **kwargs): """ :param pred: [N, *]: here should be scores in [0,1] :param label: [N, *]: values in [0,1] :return: [N] """ mask = (label > 0.5).float() b, h, w = mask.shape num_pos = torch.sum(mask, dim=[1, 2]).float() num_neg = h * w - num_pos weight = torch.zeros_like(mask) pos_weight = num_neg / (num_pos + num_neg) neg_weight = num_pos / (num_pos + num_neg) for i in range(b): weight[i][label[i] > 0.5] = pos_weight[i] weight[i][label[i] <= 0.5] = neg_weight[i] loss = torch.nn.functional.binary_cross_entropy(pred.float(), label. float(), weight=weight, reduction='none') return loss def binary_ce_loss(pred, label, **kwargs): loss = F.binary_cross_entropy(pred, label, reduction='none') loss = torch.mean(loss, dim=(1, 2)) return loss def binary_dice_loss(pred, label, smooth=1e-05): """ :param pred: [N, *]: here should be scores in [0,1] :param label: [N, *] :param power: 1 for abs, 2 for square :return: [N] """ pred = pred.contiguous().view(pred.shape[0], -1).float() label = label.contiguous().view(label.shape[0], -1).float() num = 2 * torch.sum(torch.mul(pred, label), dim=1) + smooth den = torch.sum(pred, dim=1) + torch.sum(label, dim=1) + smooth loss = 1 - num / den return loss def binary_ce_dice_loss(pred, label, smooth=1.0, **kwargs): loss1 = binary_ce_loss(pred, label, **kwargs) loss2 = binary_dice_loss(pred, label, smooth=smooth) return loss1 + loss2 def _make_one_hot(label, num_classes): """ :param label: [N, *], values in [0,num_classes) :return: [N, C, *] """ label = label.unsqueeze(1) shape = list(label.shape) shape[1] = num_classes result = torch.zeros(shape, device=label.device) result.scatter_(1, label, 1) return result def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_loss(pred_raw, label_raw, loss_func, weight=None, class_weight= None, class_weight_norm=False, reduction='mean', avg_factor=None, smooth=1.0, **kwargs): """ :param pred: [N, C, *] scores without softmax :param label: [N, *] in [0, C], 0 stands for background, 1~C stands for pred in 0~C-1 :return: reduction([N]) """ pred = pred_raw.clone() label = label_raw.clone() num_classes = pred.shape[1] if class_weight is not None: class_weight = class_weight.float() if pred.shape != label.shape: label = _make_one_hot(label, num_classes) pred = torch.sigmoid(pred) loss = 0.0 for i in range(num_classes): if isinstance(loss_func, tuple): loss_function = loss_func[i] else: loss_function = loss_func class_loss = loss_function(pred[:, i], label[:, i], smooth=smooth) if class_weight is not None: class_loss *= class_weight[i] loss += class_loss if class_weight is not None and class_weight_norm: loss = loss / torch.sum(class_weight) else: loss = loss / num_classes loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class BinaryLossNew(nn.Module): def __init__(self, loss_type='ce', reduction='mean', class_weight=None, class_weight_norm=False, loss_weight=1.0, smooth=1.0, **kwargs): super(BinaryLossNew, self).__init__() assert loss_type in ['ce', 'dice', 'cbce', 'ce_dice', 'mix'] self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.class_weight_norm = class_weight_norm self.loss_type = loss_type self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CVIU-CSU/M2MRF-Lesion-Segmentation
BinaryLoss
false
17,085
[ "Apache-2.0" ]
10
13af87927f4cdeca70e35d570edd1aec43b387b6
https://github.com/CVIU-CSU/M2MRF-Lesion-Segmentation/tree/13af87927f4cdeca70e35d570edd1aec43b387b6
Conv2dZeros
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kj/ckjpajvnrzyvwmr6rrlk6z2m4aczgu2cgohe4zihb4lpwg3dlk4p.py # Topologically Sorted Source Nodes: [output, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp] # Source node to ATen node mapping: # exp => exp # mul => mul # mul_1 => mul_1 # output => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {}) triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data def cpd_mean(tensor, dim=None, keepdims=False): if dim is None: return tensor.mean(tensor) else: if isinstance(dim, int): dim = [dim] dim = sorted(dim) for d in dim: tensor = tensor.mean(dim=d, keepdim=True) if not keepdims: for i, d in enumerate(dim): tensor.squeeze_(d - i) return tensor class Actnormlayer(nn.Module): def __init__(self, num_features, scale=1.0): super(Actnormlayer, self).__init__() self.register_buffer('is_initialized', torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.logs = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.num_features = num_features self.scale = float(scale) self.eps = 1e-06 def initialize_parameters(self, x): if not self.training: return with torch.no_grad(): bias = -cpd_mean(x.clone(), dim=[0, 2, 3], keepdims=True) v = cpd_mean((x.clone() + bias) ** 2, dim=[0, 2, 3], keepdims=True) logs = (self.scale / (v.sqrt() + self.eps)).log() self.bias.data.copy_(bias.data) self.logs.data.copy_(logs.data) self.is_initialized += 1.0 def _center(self, x, reverse=False): if reverse: return x - self.bias else: return x + self.bias def _scale(self, x, sldj, reverse=False): logs = self.logs if reverse: x = x * logs.mul(-1).exp() else: x = x * logs.exp() if sldj is not None: ldj = logs.sum() * x.size(2) * x.size(3) if reverse: sldj = sldj - ldj else: sldj = sldj + ldj return x, sldj def forward(self, x, ldj=None, reverse=False): if not self.is_initialized: self.initialize_parameters(x) if reverse: x, ldj = self._scale(x, ldj, reverse) x = self._center(x, reverse) else: x = self._center(x, reverse) x, ldj = self._scale(x, ldj, reverse) return x, ldj class Conv2d(nn.Conv2d): pad_dict = {'same': lambda kernel, stride: [(((k - 1) * s + 1) // 2) for k, s in zip(kernel, stride)], 'valid': lambda kernel, stride: [(0) for _ in kernel]} @staticmethod def get_padding(padding, kernel_size, stride): if isinstance(padding, str): if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if isinstance(stride, int): stride = [stride, stride] padding = padding.lower() try: padding = Conv2d.pad_dict[padding](kernel_size, stride) except KeyError: raise ValueError('{} is not supported'.format(padding)) return padding def __init__(self, in_channels, out_channels, kernel_size=[3, 3], stride=[1, 1], padding='same', do_actnorm=True, weight_std=0.05): padding = Conv2d.get_padding(padding, kernel_size, stride) super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, bias=not do_actnorm) self.weight.data.normal_(mean=0.0, std=weight_std) if not do_actnorm: self.bias.data.zero_() else: self.actnorm = Actnormlayer(out_channels) self.do_actnorm = do_actnorm def forward(self, input): x = super(Conv2d, self).forward(input) if self.do_actnorm: x, _ = self.actnorm(x, 0.0) return x class Conv2dZeros(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size=[3, 3], stride=[1, 1], padding='same', logscale_factor=3): padding = Conv2d.get_padding(padding, kernel_size, stride) super(Conv2dZeros, self).__init__(in_channels, out_channels, kernel_size, stride, padding) self.logscale_factor = logscale_factor self.register_parameter('logs', nn.Parameter(torch.zeros( out_channels, 1, 1))) self.weight.data.zero_() self.bias.data.zero_() def forward(self, input): output = super(Conv2dZeros, self).forward(input) return output * torch.exp(self.logs * self.logscale_factor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_exp_mul_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 def cpd_mean(tensor, dim=None, keepdims=False): if dim is None: return tensor.mean(tensor) else: if isinstance(dim, int): dim = [dim] dim = sorted(dim) for d in dim: tensor = tensor.mean(dim=d, keepdim=True) if not keepdims: for i, d in enumerate(dim): tensor.squeeze_(d - i) return tensor class Actnormlayer(nn.Module): def __init__(self, num_features, scale=1.0): super(Actnormlayer, self).__init__() self.register_buffer('is_initialized', torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.logs = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.num_features = num_features self.scale = float(scale) self.eps = 1e-06 def initialize_parameters(self, x): if not self.training: return with torch.no_grad(): bias = -cpd_mean(x.clone(), dim=[0, 2, 3], keepdims=True) v = cpd_mean((x.clone() + bias) ** 2, dim=[0, 2, 3], keepdims=True) logs = (self.scale / (v.sqrt() + self.eps)).log() self.bias.data.copy_(bias.data) self.logs.data.copy_(logs.data) self.is_initialized += 1.0 def _center(self, x, reverse=False): if reverse: return x - self.bias else: return x + self.bias def _scale(self, x, sldj, reverse=False): logs = self.logs if reverse: x = x * logs.mul(-1).exp() else: x = x * logs.exp() if sldj is not None: ldj = logs.sum() * x.size(2) * x.size(3) if reverse: sldj = sldj - ldj else: sldj = sldj + ldj return x, sldj def forward(self, x, ldj=None, reverse=False): if not self.is_initialized: self.initialize_parameters(x) if reverse: x, ldj = self._scale(x, ldj, reverse) x = self._center(x, reverse) else: x = self._center(x, reverse) x, ldj = self._scale(x, ldj, reverse) return x, ldj class Conv2d(nn.Conv2d): pad_dict = {'same': lambda kernel, stride: [(((k - 1) * s + 1) // 2) for k, s in zip(kernel, stride)], 'valid': lambda kernel, stride: [(0) for _ in kernel]} @staticmethod def get_padding(padding, kernel_size, stride): if isinstance(padding, str): if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if isinstance(stride, int): stride = [stride, stride] padding = padding.lower() try: padding = Conv2d.pad_dict[padding](kernel_size, stride) except KeyError: raise ValueError('{} is not supported'.format(padding)) return padding def __init__(self, in_channels, out_channels, kernel_size=[3, 3], stride=[1, 1], padding='same', do_actnorm=True, weight_std=0.05): padding = Conv2d.get_padding(padding, kernel_size, stride) super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, bias=not do_actnorm) self.weight.data.normal_(mean=0.0, std=weight_std) if not do_actnorm: self.bias.data.zero_() else: self.actnorm = Actnormlayer(out_channels) self.do_actnorm = do_actnorm def forward(self, input): x = super(Conv2d, self).forward(input) if self.do_actnorm: x, _ = self.actnorm(x, 0.0) return x class Conv2dZerosNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size=[3, 3], stride=[1, 1], padding='same', logscale_factor=3): padding = Conv2d.get_padding(padding, kernel_size, stride) super(Conv2dZerosNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding) self.logscale_factor = logscale_factor self.register_parameter('logs', nn.Parameter(torch.zeros( out_channels, 1, 1))) self.weight.data.zero_() self.bias.data.zero_() def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_4 = self.logs primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Catherine0505/mar-scf-flow
Conv2dZeros
false
17,086
[ "Apache-2.0" ]
10
aa7c3564cb9f2967c5e580a633516dba1b597f98
https://github.com/Catherine0505/mar-scf-flow/tree/aa7c3564cb9f2967c5e580a633516dba1b597f98
Multi_Head_Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/m4/cm4phghqj5w2sk63nw5o42zp35vrpy756culirf4hdhqb2konszt.py # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_2 => div, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/em/cemrzefmonv7kluqrhv6urbabflvt2tncfknrqz4dgzv35jr4rjs.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # out_2 => add # out_3 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xv/cxv72lw46eisa3ys22lozw4a4nobogioiypvuchke4culwgfgkoq.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # out_2 => add # out_3 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_11), kwargs = {}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex % 16 x4 = (xindex // 4) x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x5), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(buf6, primals_1, buf7, buf8, 16, grid=grid(16), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(buf6, primals_1, buf7, buf8, primals_10, primals_11, buf9, 64, grid=grid(64), stream=stream0) del buf7 del buf8 del primals_11 return (buf9, primals_1, primals_10, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, primals_8, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_Attention(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_Attention, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): batch_size = x.size(0) Q = self.fc_Q(x) K = self.fc_K(x) V = self.fc_V(x) Q = Q.view(batch_size * self.num_head, -1, self.dim_head) K = K.view(batch_size * self.num_head, -1, self.dim_head) V = V.view(batch_size * self.num_head, -1, self.dim_head) scale = K.size(-1) ** -0.5 context = self.attention(Q, K, V, scale) context = context.view(batch_size, -1, self.dim_head * self.num_head) out = self.fc(context) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'num_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x5, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3) buf4 = buf3 del buf3 get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1, buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf7 del buf8 del primals_11 return buf9, primals_1, primals_10, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, primals_8, reinterpret_tensor(buf2, (16, 1, 1 ), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0) class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_AttentionNew(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_AttentionNew, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, input_0): primals_1 = self.fc_Q.weight primals_3 = self.fc_Q.bias primals_2 = self.fc_K.weight primals_5 = self.fc_K.bias primals_4 = self.fc_V.weight primals_7 = self.fc_V.bias primals_6 = self.fc.weight primals_9 = self.fc.bias primals_10 = self.layer_norm.weight primals_11 = self.layer_norm.bias primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Ch4ndelier/Transformer_Zero_Velocity_classification
Multi_Head_Attention
false
17,087
[ "MIT" ]
6
857efb66189c503e983c11bd7dde16ad19c51ada
https://github.com/Ch4ndelier/Transformer_Zero_Velocity_classification/tree/857efb66189c503e983c11bd7dde16ad19c51ada
FCNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ry/cryl4mlcbqnhycqy2ocfprfhifkeb6c7ygxkpoyn4aojx77wfb5q.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm import torch.nn.modules.module class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm import torch.nn.modules.module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0) class FCNetNew(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNetNew, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, input_0): primals_3 = self.lin.bias primals_1 = self.lin.weight_g primals_2 = self.lin.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ChCh1999/RTPB
FCNet
false
17,088
[ "MIT" ]
8
1066a3bfe4fe1b41eff74fd152936880302a60a2
https://github.com/ChCh1999/RTPB/tree/1066a3bfe4fe1b41eff74fd152936880302a60a2
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ag/caggqh3nrqirp3azykpm6daivhojwggbhob6pnubsheittrjeiky.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qw/cqw2ahskucrmwkbadqswj76db4kfcxn5w76kji7peia7kwu4zbkq.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1.0), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/sn/csn7adwcrtimy326lupk2anpklflbjwcebi4icerax26g54j67hv.py # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # demod => rsqrt # mul_2 => mul_2 # pow_1 => pow_1 # sum_1 => sum_1 # weight => mul_3 # weight_1 => mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0; del buf0 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return (reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch.nn import functional as F from torch import nn def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def downsample(images, size=256): if images.shape[2] > size: factor = images.shape[2] // size assert factor * size == images.shape[2] images = images.view([-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) images = images.mean(dim=[3, 5]) return images else: assert images.shape[-1] == 256 return images def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.reshape(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch.nn import functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def downsample(images, size=256): if images.shape[2] > size: factor = images.shape[2] // size assert factor * size == images.shape[2] images = images.view([-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) images = images.mean(dim=[3, 5]) return images else: assert images.shape[-1] == 256 return images def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1.0, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_3 = self.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BillyXYB/TransEditor
ModulatedConv2d
false
17,089
[ "MIT" ]
4
0194cd6f0e96c801d55c0cb9683e1f552bcf6d48
https://github.com/BillyXYB/TransEditor/tree/0194cd6f0e96c801d55c0cb9683e1f552bcf6d48
Position_wise_Feed_Forward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4g/c4guhk7x6skkidedvs2gxz2kcu6gb76l3ig5crjjvjtzvnjlhlte.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/f6/cf6g5vjl6clpvfa2j7jw5adg3xchgkyal7cg5smxzk57hjhn3cgo.py # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # out_4 => add # out_5 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/em/cem6vr7kjs5yrruf5v4ykxzmxb7usf5y77j2nupb2ytwzcqkihrt.py # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # out_4 => add # out_5 => add_1, add_2, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(buf2, primals_3, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(buf2, primals_3, buf3, buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0) del buf3 del buf4 del primals_7 return (buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Position_wise_Feed_Forward(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_Forward, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_3, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_3, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class Position_wise_Feed_ForwardNew(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_ForwardNew, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Ch4ndelier/Transformer_Zero_Velocity_classification
Position_wise_Feed_Forward
false
17,090
[ "MIT" ]
6
857efb66189c503e983c11bd7dde16ad19c51ada
https://github.com/Ch4ndelier/Transformer_Zero_Velocity_classification/tree/857efb66189c503e983c11bd7dde16ad19c51ada
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/3r/c3runacu4pkgvdlmsngxxodg4pf6xmzvzxpf7xzbkjc3ay27rdj3.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 4), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qy/cqykzoe7kdcow5fbdv734d5wts7e36niwxyam2qtol2mvctkisk3.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_2 => add, clone, rsqrt, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/52/c52aswybwyns3xog32uie3enm3x65oo7iirbemg7s7gts66rbmvu.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_2 => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2c/c2chr2335ys4k6vbu4o5ll5sgi27jdfp6ruu6btb7bafva3vmlxy.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_4 => add_2, erf, mul_2, mul_3, mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_2), kwargs = {}) triton_poi_fused_gelu_3 = async_compile.triton('triton_poi_fused_gelu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/k2/ck22mqjue6rud6ftosccvxwcmirpose7lxy3phkvz7pzex7vcotj.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add] # Source node to ATen node mapping: # x_8 => add_3 # Graph fragment: # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_3), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (4, 16), (16, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_4, primals_5, buf4, 64, 4, grid=grid(64, 4), stream=stream0) del buf2 del buf3 del primals_5 buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu] triton_poi_fused_gelu_3.run(buf5, buf6, 1024, grid=grid(1024), stream=stream0) buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7) del primals_9 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add] triton_poi_fused_add_4.run(primals_1, primals_10, buf7, buf8, 16, 16, grid=grid(16, 16), stream=stream0) return (buf8, primals_1, primals_2, primals_4, primals_10, buf1, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), buf7, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class LayerNorm(nn.Module): """ LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last' ): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ['channels_last', 'channels_first']: raise NotImplementedError self.normalized_shape = normalized_shape, def forward(self, x): if self.data_format == 'channels_last': return F.layer_norm(x, self.normalized_shape, self.weight, self .bias, self.eps) elif self.data_format == 'channels_first': u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class Block(nn.Module): """ ConvNeXt Block. There are two equivalent implementations: (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back We use (2) as we find it slightly faster in PyTorch Args: dim (int): Number of input channels. drop_path (float): Stochastic depth rate. Default: 0.0 layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. """ def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) self.norm = LayerNorm(dim, eps=1e-06) self.pwconv1 = nn.Linear(dim, 4 * dim) self.act = nn.GELU() self.pwconv2 = nn.Linear(4 * dim, dim) self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True) if layer_scale_init_value > 0 else None self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() def forward(self, x): input = x x = self.dwconv(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) x = input + self.drop_path(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (4, 16), (16, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(64, 4)](buf1, buf2, buf3, primals_4, primals_5, buf4, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_gelu_3[grid(1024)](buf5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7) del primals_9 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_4[grid(16, 16)](primals_1, primals_10, buf7, buf8, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) return (buf8, primals_1, primals_2, primals_4, primals_10, buf1, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), buf7, primals_8, primals_6) class LayerNorm(nn.Module): """ LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last' ): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ['channels_last', 'channels_first']: raise NotImplementedError self.normalized_shape = normalized_shape, def forward(self, x): if self.data_format == 'channels_last': return F.layer_norm(x, self.normalized_shape, self.weight, self .bias, self.eps) elif self.data_format == 'channels_first': u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class BlockNew(nn.Module): """ ConvNeXt Block. There are two equivalent implementations: (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back We use (2) as we find it slightly faster in PyTorch Args: dim (int): Number of input channels. drop_path (float): Stochastic depth rate. Default: 0.0 layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. """ def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) self.norm = LayerNorm(dim, eps=1e-06) self.pwconv1 = nn.Linear(dim, 4 * dim) self.act = nn.GELU() self.pwconv2 = nn.Linear(4 * dim, dim) self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True) if layer_scale_init_value > 0 else None self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() def forward(self, input_0): primals_3 = self.gamma primals_2 = self.dwconv.weight primals_4 = self.dwconv.bias primals_5 = self.norm.weight primals_9 = self.norm.bias primals_6 = self.pwconv1.weight primals_7 = self.pwconv1.bias primals_8 = self.pwconv2.weight primals_10 = self.pwconv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
CarnoZhao/mmdetection
Block
false
17,091
[ "Apache-2.0" ]
10
b85eaffdf1af28eaffcc2263110a059237cf5b23
https://github.com/CarnoZhao/mmdetection/tree/b85eaffdf1af28eaffcc2263110a059237cf5b23
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/iy/ciycn2wsaxhacx4ccrdp7xbsshpfpl5n6mimevpioo33m4s2rqa2.py # Topologically Sorted Source Nodes: [sub, sub_1, add, audio, mul, sub_2], Original ATen: [aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # audio => div # mul => mul # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %unsqueeze_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {}) triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.minimum(tmp1, tmp2) tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = triton_helpers.maximum(tmp1, tmp2) tmp10 = triton_helpers.maximum(tmp9, tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = tmp11 - tmp7 tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp8 / tmp14 tmp16 = 2.0 tmp17 = tmp15 * tmp16 tmp18 = 1.0 tmp19 = tmp17 - tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, sub_1, add, audio, mul, sub_2], Original ATen: [aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Normalize(nn.Module): """ Scale Audio to be between -1 and 1 """ def __init__(self): super(Normalize, self).__init__() def forward(self, audio: 'torch.Tensor'): if len(audio.shape) != 2: raise ValueError('Audio should be 2D: [batch_size X audio_length]') if audio.shape[1] < 1: raise ValueError('Audio length is zero') max_value = torch.max(audio, dim=1)[0].detach() min_value = torch.min(audio, dim=1)[0].detach() max_value = torch.unsqueeze(max_value, 1) min_value = torch.unsqueeze(min_value, 1) audio = (audio - min_value) / (max_value - min_value + 1e-10) return audio * 2 - 1 def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.minimum(tmp1, tmp2) tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = triton_helpers.maximum(tmp1, tmp2) tmp10 = triton_helpers.maximum(tmp9, tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = tmp11 - tmp7 tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp8 / tmp14 tmp16 = 2.0 tmp17 = tmp15 * tmp16 tmp18 = 1.0 tmp19 = tmp17 - tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): """ Scale Audio to be between -1 and 1 """ def __init__(self): super(NormalizeNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CiscoDevNet/vo-id
Normalize
false
17,092
[ "MIT" ]
7
9a01f866c7539a9cd095d9627ba4f65ad540ea6b
https://github.com/CiscoDevNet/vo-id/tree/9a01f866c7539a9cd095d9627ba4f65ad540ea6b
WNConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/tm/ctmx5yf3inqayj2kcfkajsklppap45mn4o5umm3r6qbzigmnbzp3.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused__weight_norm_interface_0 = async_compile.triton('triton_per_fused__weight_norm_interface_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yh/cyhtvbmvd4ixhoxvv4hb6hnp7dco6t5iqvu2yj4c7hf4ejmbx4sb.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 81) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] stream0 = get_raw_stream(0) triton_per_fused__weight_norm_interface_0.run(buf1, primals_2, primals_1, buf2, 4, 64, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 9, 9), (324, 81, 9, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf4, primals_3, 1296, grid=grid(1296), stream=stream0) del primals_3 return (buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class WNConv2d(nn.Module): """Weight-normalized 2d convolution. Args: in_channels (int): Number of channels in the input. out_channels (int): Number of channels in the output. kernel_size (int): Side length of each convolutional kernel. padding (int): Padding to add on edges of input. bias (bool): Use bias in the convolution operation. """ def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True): super(WNConv2d, self).__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)) def forward(self, x): x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 9, 9), (324, 81, 9, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(1296)](buf4, primals_3, 1296, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2 class WNConv2dNew(nn.Module): """Weight-normalized 2d convolution. Args: in_channels (int): Number of channels in the input. out_channels (int): Number of channels in the output. kernel_size (int): Side length of each convolutional kernel. padding (int): Padding to add on edges of input. bias (bool): Use bias in the convolution operation. """ def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True): super(WNConv2dNew, self).__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)) def forward(self, input_0): primals_3 = self.conv.bias primals_1 = self.conv.weight_g primals_2 = self.conv.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Catherine0505/mar-scf-flow
WNConv2d
false
17,093
[ "Apache-2.0" ]
10
aa7c3564cb9f2967c5e580a633516dba1b597f98
https://github.com/Catherine0505/mar-scf-flow/tree/aa7c3564cb9f2967c5e580a633516dba1b597f98
GeLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/em/cemsldl7gztewz6taszipz2hlq6opzg5r7czurt7p4quyofjctdo.py # Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.7978845608), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.044715), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %add), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7978845608 tmp4 = tmp0 * tmp3 tmp5 = 0.044715 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp0 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tmp11 = libdevice.tanh(tmp10) tmp12 = tmp11 + tmp8 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GeLU(nn.Module): def forward(self, x): return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7978845608 tmp4 = tmp0 * tmp3 tmp5 = 0.044715 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp0 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tmp11 = libdevice.tanh(tmp10) tmp12 = tmp11 + tmp8 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GeLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Chriskuei/FedMatch
GeLU
false
17,094
[ "Apache-2.0" ]
4
305e8c4bbb398712b00c883a986dfec17b500f76
https://github.com/Chriskuei/FedMatch/tree/305e8c4bbb398712b00c883a986dfec17b500f76
ApplySingleAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ry/cryl4mlcbqnhycqy2ocfprfhifkeb6c7ygxkpoyn4aojx77wfb5q.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/co/ccou4wduxtrcy64fzego4larxydshqtohbkuafuca7ytwsykrulg.py # Topologically Sorted Source Nodes: [v__1], Original ATen: [aten.clone] # Source node to ATen node mapping: # v__1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zi/czibctcia627qmko4n6pcuoykdtfvbksmnolcbi7aspd7lvl63ix.py # Topologically Sorted Source Nodes: [v__1], Original ATen: [aten.clone] # Source node to ATen node mapping: # v__1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fr/cfrw3mc5mi2jxtwxtrvl2sr6c6o24bpwipa5eylls2nvluwdr6r3.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (), ()) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (), ()) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten.norm, aten.div, aten.mul] triton_per_fused_div_mul_norm_0.run(buf5, primals_6, primals_5, buf6, 1, 16, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [v__1], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf3, primals_3, buf8, 16, 4, grid=grid(16, 4), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [v__1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(primals_9, buf9, 256, grid=grid(256), stream=stream0) del primals_9 buf10 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [v__1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10) buf11 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [h_], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf7, primals_7, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [h_], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12) buf13 = empty_strided_cuda((), (), torch.float32) buf14 = buf13; del buf13 # reuse buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten.norm, aten.div, aten.mul] triton_per_fused_div_mul_norm_0.run(buf14, primals_11, primals_10, buf15, 1, 16, grid=grid(1), stream=stream0) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_12 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf7, primals_7, buf17, 64, grid=grid(64), stream=stream0) del buf7 del primals_7 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf3, primals_3, buf18, 64, grid=grid(64), stream=stream0) del buf3 del primals_3 return (reinterpret_tensor(buf16, (4, 1, 4), (4, 4, 1), 0), buf2, buf6, buf15, primals_1, primals_2, primals_5, primals_6, primals_10, primals_11, buf1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf14, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf15, reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf11, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), buf17, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm import torch.nn.modules.module class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x class ApplySingleAttention(nn.Module): def __init__(self, v_features, q_features, mid_features, drop=0.0): super(ApplySingleAttention, self).__init__() self.lin_v = FCNet(v_features, mid_features, activate='relu', drop=drop ) self.lin_q = FCNet(q_features, mid_features, activate='relu', drop=drop ) self.lin_atten = FCNet(mid_features, mid_features, drop=drop) def forward(self, v, q, atten): """ v = batch, num_obj, dim q = batch, que_len, dim atten: batch x v_num x q_num """ v_ = self.lin_v(v).transpose(1, 2).unsqueeze(2) q_ = self.lin_q(q).transpose(1, 2).unsqueeze(3) v_ = torch.matmul(v_, atten.unsqueeze(1)) h_ = torch.matmul(v_, q_) h_ = h_.squeeze(3).squeeze(2) atten_h = self.lin_atten(h_.unsqueeze(1)) return atten_h def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'v_features': 4, 'q_features': 4, 'mid_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn from torch.nn.utils import weight_norm import torch.nn.modules.module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (), ()) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (), ()) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_6, primals_5, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf3, primals_3, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](primals_9, buf9, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10) buf11 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_1[grid(16, 4)](buf7, primals_7, buf11, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12) buf13 = empty_strided_cuda((), (), torch.float32) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_div_mul_norm_0[grid(1)](buf14, primals_11, primals_10, buf15, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf16) del primals_12 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf7, primals_7, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf7 del primals_7 buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf3, primals_3, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_3 return (reinterpret_tensor(buf16, (4, 1, 4), (4, 4, 1), 0), buf2, buf6, buf15, primals_1, primals_2, primals_5, primals_6, primals_10, primals_11, buf1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf14, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf15, reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf11, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), buf17, buf18) class FCNet(nn.Module): def __init__(self, in_size, out_size, activate=None, drop=0.0): super(FCNet, self).__init__() self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None) self.drop_value = drop self.drop = nn.Dropout(drop) self.activate = activate.lower() if activate is not None else None if activate == 'relu': self.ac_fn = nn.ReLU() elif activate == 'sigmoid': self.ac_fn = nn.Sigmoid() elif activate == 'tanh': self.ac_fn = nn.Tanh() def forward(self, x): if self.drop_value > 0: x = self.drop(x) x = self.lin(x) if self.activate is not None: x = self.ac_fn(x) return x class ApplySingleAttentionNew(nn.Module): def __init__(self, v_features, q_features, mid_features, drop=0.0): super(ApplySingleAttentionNew, self).__init__() self.lin_v = FCNet(v_features, mid_features, activate='relu', drop=drop ) self.lin_q = FCNet(q_features, mid_features, activate='relu', drop=drop ) self.lin_atten = FCNet(mid_features, mid_features, drop=drop) def forward(self, input_0, input_1, input_2): primals_3 = self.lin_v.lin.bias primals_1 = self.lin_v.lin.weight_g primals_2 = self.lin_v.lin.weight_v primals_7 = self.lin_q.lin.bias primals_5 = self.lin_q.lin.weight_g primals_6 = self.lin_q.lin.weight_v primals_12 = self.lin_atten.lin.bias primals_10 = self.lin_atten.lin.weight_g primals_11 = self.lin_atten.lin.weight_v primals_4 = input_0 primals_8 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
ChCh1999/RTPB
ApplySingleAttention
false
17,095
[ "MIT" ]
8
1066a3bfe4fe1b41eff74fd152936880302a60a2
https://github.com/ChCh1999/RTPB/tree/1066a3bfe4fe1b41eff74fd152936880302a60a2
BothContextGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5s/c5s7q2ybzwodeuorig3awcwebk53edcn54ogeiiskva3k6c4iqxd.py # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_tensor => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/r6/cr6pdrpsjrizy2emadm3elrnad3xvoc6ejngp3sx6gbksgyalpru.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3p/c3pf6dt3zjvt4f54gg5ydgcf2etvvyrydqq473h2gccvvmljtaet.py # Topologically Sorted Source Nodes: [z, sub, mul, mul_1, add, tanh], Original ATen: [aten.sigmoid, aten.rsub, aten.mul, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # sub => sub # tanh => tanh # z => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %addmm_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %addmm_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp4 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = tmp1 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.tanh(tmp8) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_source], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, primals_2, buf3, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_target], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [z, sub, mul, mul_1, add, tanh], Original ATen: [aten.sigmoid, aten.rsub, aten.mul, aten.add, aten.tanh] triton_poi_fused_add_mul_rsub_sigmoid_tanh_2.run(buf1, buf4, buf2, buf5, 16, grid=grid(16), stream=stream0) return (buf5, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class BothContextGate(nn.Module): """Apply the context gate to both contexts""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(BothContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh((1.0 - z) * target + z * source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = tmp1 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.tanh(tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(16)](buf1, buf4, buf2, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf5, primals_3, buf0, buf1, buf2, buf3, buf4, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class BothContextGateNew(nn.Module): """Apply the context gate to both contexts""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(BothContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
ChenRocks/Distill-BERT-Textgen-ONMT
BothContextGate
false
17,096
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
SourceContextGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5s/c5s7q2ybzwodeuorig3awcwebk53edcn54ogeiiskva3k6c4iqxd.py # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_tensor => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/r6/cr6pdrpsjrizy2emadm3elrnad3xvoc6ejngp3sx6gbksgyalpru.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6j/c6jegmwlu3gpzd7ocps6mkqksk32tyz3b5p2rf3sercrtr3k2m3s.py # Topologically Sorted Source Nodes: [z, mul, add, tanh], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # mul => mul # tanh => tanh # z => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %addmm_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_tensor, %mul), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_source], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, primals_2, buf3, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), out=buf4) del primals_8 buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [z, mul, add, tanh], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.tanh] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf5, primals_9, buf1, buf2, 16, grid=grid(16), stream=stream0) del primals_9 return (buf5, primals_3, buf0, buf1, buf2, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class SourceContextGate(nn.Module): """Apply the context gate only to the source context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(SourceContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh(target + z * source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8 ), 0), out=buf4) del primals_8 buf5 = buf4 del buf4 triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf5, primals_9, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 return buf5, primals_3, buf0, buf1, buf2, buf3, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class SourceContextGateNew(nn.Module): """Apply the context gate only to the source context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(SourceContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
ChenRocks/Distill-BERT-Textgen-ONMT
SourceContextGate
false
17,097
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
NoiseInjection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5i/c5itgadrrjqedncsqmt7gkyi2u4rtmc72mygkdrubora2tjkucz3.py # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %randn), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.randn] buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Optional import torch.nn as nn class NoiseInjection(nn.Module): """ Model injects noisy bias to input tensor """ def __init__(self) ->None: """ Constructor method """ super(NoiseInjection, self).__init__() self.weight = nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True) def forward(self, input: 'torch.Tensor', noise: 'Optional[torch.Tensor]'=None) ->torch.Tensor: """ Forward pass :param input: (torch.Tensor) Input tensor :param noise: (Optional[torch.Tensor]) Noise tensor :return: (torch.Tensor) Output tensor """ if noise is None: noise = torch.randn(input.shape[0], 1, input.shape[2], input. shape[3], device=input.device, dtype=torch.float32) return input + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class NoiseInjectionNew(nn.Module): """ Model injects noisy bias to input tensor """ def __init__(self) ->None: """ Constructor method """ super(NoiseInjectionNew, self).__init__() self.weight = nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ChristophReich1996/Multi-StyleGAN
NoiseInjection
false
17,098
[ "MIT" ]
7
988f2dfea85b3205126b40c61edfb28107eb3173
https://github.com/ChristophReich1996/Multi-StyleGAN/tree/988f2dfea85b3205126b40c61edfb28107eb3173
TargetContextGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5s/c5s7q2ybzwodeuorig3awcwebk53edcn54ogeiiskva3k6c4iqxd.py # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_tensor => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/r6/cr6pdrpsjrizy2emadm3elrnad3xvoc6ejngp3sx6gbksgyalpru.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wx/cwxgaxitmqslced4igsbzz4fo6hjnraigfwkx4zs3jtiedrlhwbc.py # Topologically Sorted Source Nodes: [z, mul, add, tanh], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # mul => mul # tanh => tanh # z => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %addmm_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %add_tensor), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_out_ptr0 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp6 = tmp4 + tmp5 tmp7 = tmp3 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, primals_2, buf3, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_target], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [z, mul, add, tanh], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.tanh] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf5, buf1, buf4, primals_7, 16, grid=grid(16), stream=stream0) del primals_7 return (buf5, primals_3, buf0, buf1, buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class TargetContextGate(nn.Module): """Apply the context gate only to the target context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(TargetContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh(z * target + source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_out_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp6 = tmp4 + tmp5 tmp7 = tmp3 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = buf2 del buf2 triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf5, buf1, buf4, primals_7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 return buf5, primals_3, buf0, buf1, buf3, buf4, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class TargetContextGateNew(nn.Module): """Apply the context gate only to the target context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(TargetContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
ChenRocks/Distill-BERT-Textgen-ONMT
TargetContextGate
false
17,099
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
ContextGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5s/c5s7q2ybzwodeuorig3awcwebk53edcn54ogeiiskva3k6c4iqxd.py # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_tensor => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/32/c32slg6hax4e5s5lih7gq4k2u4kiyq7iy3dwygl5hzxvxpvxxodr.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # z => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mt/cmtt4jgavxgyxw4hrt3tqlulzsn4ezp2jrwnzdwmgwcfb3ryqurt.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_tensor], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [z], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf2, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_source], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 del primals_7 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_1, primals_2, buf4, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [proj_target], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_8 del primals_9 return (buf2, buf3, buf5, primals_3, buf0, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 del primals_7 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_2[grid(32)](primals_1, primals_2, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_8 del primals_9 return buf2, buf3, buf5, primals_3, buf0, buf2, buf4 class ContextGateNew(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGateNew, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, input_0, input_1, input_2): primals_4 = self.gate.weight primals_5 = self.gate.bias primals_1 = self.source_proj.weight primals_7 = self.source_proj.bias primals_8 = self.target_proj.weight primals_9 = self.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2]
ChenRocks/Distill-BERT-Textgen-ONMT
ContextGate
false
17,100
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4u/c4u3liwgy6ah3xlxfhvbczvvfare3yvkd7osc2eyowkxndto3p24.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gn/cgn3tpasui6fv3xxba47jzqip7bgipyrz4akedry64e2fx5k4rvd.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3a/c3asrsyuybhvyyokqadgkn4dnhzrdoetfuhpfcg2pkqy2j4beaia.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/h3/ch3dkn75z5nv2s6poei22lobtkafusftzt2ks6goill4cq3nfbmj.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf6, primals_7, primals_3, 256, grid=grid(256), stream=stream0) del primals_7 return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, buf7, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """Layer definition. Args: x: ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor): Output ``(batch_size, input_len, model_dim)``. """ inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4, primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (64, 4), (4, 1), 0 ), primals_6, buf7, primals_4 class PositionwiseFeedForwardNew(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. """ def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, input_0): primals_4 = self.w_1.weight primals_1 = self.w_1.bias primals_6 = self.w_2.weight primals_2 = self.w_2.bias primals_5 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ChenRocks/Distill-BERT-Textgen-ONMT
PositionwiseFeedForward
false
17,101
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
WassersteinDiscriminatorLossCutMix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gz/cgzqdldp3pmreasi22vy33kdwntl2c2mohdzvzqihmeb3a4xiaap.py # Topologically Sorted Source Nodes: [mul, mean, loss_real, neg_1, add, mul_1, loss_fake], Original ATen: [aten.mul, aten.mean, aten.neg, aten.add] # Source node to ATen node mapping: # add => add # loss_fake => mean_1 # loss_real => neg # mean => mean # mul => mul # mul_1 => mul_1 # neg_1 => neg_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %add), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_add_mean_mul_neg_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_neg_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = -tmp1 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp0 * tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp5 / tmp13 tmp15 = -tmp14 tmp16 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, mean, loss_real, neg_1, add, mul_1, loss_fake], Original ATen: [aten.mul, aten.mean, aten.neg, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_neg_0.run(buf2, buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple import torch.nn as nn class WassersteinDiscriminatorLossCutMix(nn.Module): """ This class implements the Wasserstein loss for a discriminator network when utilizing cut mix augmentation. """ def __init__(self) ->None: """ Constructor method """ super(WassersteinDiscriminatorLossCutMix, self).__init__() def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor' ) ->Tuple[torch.Tensor, torch.Tensor]: """ Forward pass. Loss parts are not summed up to not retain the whole backward graph later. :param prediction: (torch.Tensor) :return: (Tuple[torch.Tensor, torch.Tensor]) Loss values for real and fake part """ loss_real = -torch.mean(prediction * label) loss_fake = torch.mean(prediction * (-label + 1.0)) return loss_real, loss_fake def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_neg_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = -tmp1 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp0 * tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp5 / tmp13 tmp15 = -tmp14 tmp16 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_mean_mul_neg_0[grid(1)](buf2, buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, buf3 class WassersteinDiscriminatorLossCutMixNew(nn.Module): """ This class implements the Wasserstein loss for a discriminator network when utilizing cut mix augmentation. """ def __init__(self) ->None: """ Constructor method """ super(WassersteinDiscriminatorLossCutMixNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
ChristophReich1996/Multi-StyleGAN
WassersteinDiscriminatorLossCutMix
false
17,102
[ "MIT" ]
7
988f2dfea85b3205126b40c61edfb28107eb3173
https://github.com/ChristophReich1996/Multi-StyleGAN/tree/988f2dfea85b3205126b40c61edfb28107eb3173
NonSaturatingLogisticDiscriminatorLossCutMix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/qs/cqsdfmnlh3x4j3kx2wpr736lm3ggwv3mq5kkczkwr47bioyp2i56.py # Topologically Sorted Source Nodes: [neg, softplus, mul, loss_real, softplus_1, neg_1, add, mul_1, loss_fake], Original ATen: [aten.neg, aten.softplus, aten.mul, aten.mean, aten.add] # Source node to ATen node mapping: # add => add # loss_fake => mean_1 # loss_real => mean # mul => mul # mul_1 => mul_1 # neg => neg # neg_1 => neg_1 # softplus => exp, gt, log1p, where # softplus_1 => exp_1, gt_1, log1p_1, where_1 # Graph fragment: # %neg : [num_users=3] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%neg, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %neg, %log1p), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %arg1_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %arg0_1, %log1p_1), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, %add), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_add_mean_mul_neg_softplus_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_softplus_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_softplus_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp7 = tl.load(in_ptr1 + (r0), None) tmp1 = -tmp0 tmp2 = 20.0 tmp3 = tmp1 > tmp2 tmp4 = tl_math.exp(tmp1) tmp5 = libdevice.log1p(tmp4) tmp6 = tl.where(tmp3, tmp1, tmp5) tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp0 > tmp2 tmp13 = tl_math.exp(tmp0) tmp14 = libdevice.log1p(tmp13) tmp15 = tl.where(tmp12, tmp0, tmp14) tmp16 = -tmp7 tmp17 = 1.0 tmp18 = tmp16 + tmp17 tmp19 = tmp15 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = 256.0 tmp24 = tmp11 / tmp23 tmp25 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp25, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [neg, softplus, mul, loss_real, softplus_1, neg_1, add, mul_1, loss_fake], Original ATen: [aten.neg, aten.softplus, aten.mul, aten.mean, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_0.run(buf2, buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple import torch.nn as nn import torch.nn.functional as F class NonSaturatingLogisticDiscriminatorLossCutMix(nn.Module): """ Implementation of the non saturating GAN loss for the discriminator network when performing cut mix augmentation. """ def __init__(self) ->None: """ Constructor """ super(NonSaturatingLogisticDiscriminatorLossCutMix, self).__init__() def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor' ) ->Tuple[torch.Tensor, torch.Tensor]: """ Forward pass. Loss parts are not summed up to not retain the whole backward graph later. :param prediction: (torch.Tensor) :return: (Tuple[torch.Tensor, torch.Tensor]) Loss values for real and fake part """ loss_real = torch.mean(F.softplus(-prediction) * label) loss_fake = torch.mean(F.softplus(prediction) * (-label + 1.0)) return loss_real, loss_fake def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp2 = 20.0 tmp3 = tmp1 > tmp2 tmp4 = tl_math.exp(tmp1) tmp5 = libdevice.log1p(tmp4) tmp6 = tl.where(tmp3, tmp1, tmp5) tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp0 > tmp2 tmp13 = tl_math.exp(tmp0) tmp14 = libdevice.log1p(tmp13) tmp15 = tl.where(tmp12, tmp0, tmp14) tmp16 = -tmp7 tmp17 = 1.0 tmp18 = tmp16 + tmp17 tmp19 = tmp15 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = 256.0 tmp24 = tmp11 / tmp23 tmp25 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_0[grid(1)](buf2, buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, buf3 class NonSaturatingLogisticDiscriminatorLossCutMixNew(nn.Module): """ Implementation of the non saturating GAN loss for the discriminator network when performing cut mix augmentation. """ def __init__(self) ->None: """ Constructor """ super(NonSaturatingLogisticDiscriminatorLossCutMixNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
ChristophReich1996/Multi-StyleGAN
NonSaturatingLogisticDiscriminatorLossCutMix
false
17,103
[ "MIT" ]
7
988f2dfea85b3205126b40c61edfb28107eb3173
https://github.com/ChristophReich1996/Multi-StyleGAN/tree/988f2dfea85b3205126b40c61edfb28107eb3173
MinibatchStdDev
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/io/cio34btwojspz5swlstwodb5rnk5sdzuyr5rn7jnubh5hbajuwv7.py # Topologically Sorted Source Nodes: [mean, output, pow_1, mean_1, clamp, output_1, mean_2, output_3], Original ATen: [aten.mean, aten.sub, aten.pow, aten.clamp, aten.sqrt, aten.repeat] # Source node to ATen node mapping: # clamp => clamp_min # mean => mean # mean_1 => mean_1 # mean_2 => mean_2 # output => sub # output_1 => sqrt # output_3 => repeat # pow_1 => pow_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mean_1, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view, [4, 1, 4, 4]), kwargs = {}) triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0 = async_compile.triton('triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = triton_helpers.maximum(tmp20, tmp21) tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp28, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/uw/cuwbof5ypfifmq2ruxcadtdzir6c2cgmgwoo2zjn7d3qw5rccuaf.py # Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # output_4 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [mean, output, pow_1, mean_1, clamp, output_1, mean_2, output_3], Original ATen: [aten.mean, aten.sub, aten.pow, aten.clamp, aten.sqrt, aten.repeat] stream0 = get_raw_stream(0) triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MinibatchStdDev(nn.Module): """ Mini-batch standard deviation module computes the standard deviation of every feature vector of a pixel and concatenates the resulting map to the original tensor """ def __init__(self, alpha: 'float'=1e-08) ->None: """ Constructor method :param alpha: (float) Small constant for numeric stability """ super(MinibatchStdDev, self).__init__() self.alpha = alpha def forward(self, input: 'torch.Tensor') ->torch.Tensor: """ Forward pass :param input: (Torch Tensor) Input tensor [batch size, channels,, height, width] :return: (Torch Tensor) Output tensor [batch size, channels, height, width] """ output = input - torch.mean(input, dim=0, keepdim=True) output = torch.sqrt(torch.mean(output ** 2, dim=0, keepdim=False). clamp(min=self.alpha)) output = torch.mean(output).view(1, 1, 1) output = output.repeat(input.shape[0], 1, input.shape[2], input. shape[3]) output = torch.cat((input, output), 1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = triton_helpers.maximum(tmp20, tmp21) tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_clamp_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf3, class MinibatchStdDevNew(nn.Module): """ Mini-batch standard deviation module computes the standard deviation of every feature vector of a pixel and concatenates the resulting map to the original tensor """ def __init__(self, alpha: 'float'=1e-08) ->None: """ Constructor method :param alpha: (float) Small constant for numeric stability """ super(MinibatchStdDevNew, self).__init__() self.alpha = alpha def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ChristophReich1996/Multi-StyleGAN
MinibatchStdDev
false
17,104
[ "MIT" ]
7
988f2dfea85b3205126b40c61edfb28107eb3173
https://github.com/ChristophReich1996/Multi-StyleGAN/tree/988f2dfea85b3205126b40c61edfb28107eb3173
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/m4/cm4phghqj5w2sk63nw5o42zp35vrpy756culirf4hdhqb2konszt.py # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_2 => div, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/em/cemrzefmonv7kluqrhv6urbabflvt2tncfknrqz4dgzv35jr4rjs.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # out_2 => add # out_3 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xv/cxv72lw46eisa3ys22lozw4a4nobogioiypvuchke4culwgfgkoq.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # out_2 => add # out_3 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_10), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_11), kwargs = {}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex % 16 x4 = (xindex // 4) x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x5), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dh/cdhmb2aiwrw5oo2zcx7c562mi2qxhrzhzggu6tpw7bh655aql4ah.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_5 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_13,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/l3/cl35un3kcsf7pvplhghgirnuph6p7ili57xhcg3be5ggxi7rvwhl.py # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.add] # Source node to ATen node mapping: # out_8 => add_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %add_2), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xn/cxni5o3bqn7ehlprfyyqqqfgj7d5cqyj63kyztvr77zovtxa26e2.py # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # out_9 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5j/c5j4yayex57kwcn3brpymomkx3jk3mwt3h5thk6a7gey24s5tlwc.py # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # out_9 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_16), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_17), kwargs = {}) triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(buf6, primals_1, buf7, buf8, 16, grid=grid(16), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(buf6, primals_1, buf7, buf8, primals_10, primals_11, buf9, 64, grid=grid(64), stream=stream0) del primals_11 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf11, primals_13, buf17, 64, grid=grid(64), stream=stream0) del primals_13 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf13, primals_15, buf9, 64, grid=grid(64), stream=stream0) del primals_15 buf14 = buf8; del buf8 # reuse buf15 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_5.run(buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(buf13, buf14, buf15, primals_16, primals_17, buf16, 64, grid=grid(64), stream=stream0) del buf14 del buf15 del primals_17 return (buf16, primals_1, primals_10, primals_16, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf13, primals_14, buf17, primals_12, primals_8, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_Attention(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_Attention, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): batch_size = x.size(0) Q = self.fc_Q(x) K = self.fc_K(x) V = self.fc_V(x) Q = Q.view(batch_size * self.num_head, -1, self.dim_head) K = K.view(batch_size * self.num_head, -1, self.dim_head) V = V.view(batch_size * self.num_head, -1, self.dim_head) scale = K.size(-1) ** -0.5 context = self.attention(Q, K, V, scale) context = context.view(batch_size, -1, self.dim_head * self.num_head) out = self.fc(context) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class Position_wise_Feed_Forward(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_Forward, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class Encoder(nn.Module): def __init__(self, dim_model, num_head, hidden, dropout): super(Encoder, self).__init__() self.attention = Multi_Head_Attention(dim_model, num_head, dropout) self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout) def forward(self, x): out = self.attention(x) out = self.feed_forward(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'num_head': 4, 'hidden': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x5, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3) buf4 = buf3 del buf3 get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1, buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf11, primals_13, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused_add_4[grid(64)](buf13, primals_15, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf14 = buf8 del buf8 buf15 = buf7 del buf7 triton_poi_fused_native_layer_norm_5[grid(16)](buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](buf13, buf14, buf15, primals_16, primals_17, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_17 return buf16, primals_1, primals_10, primals_16, buf4, reinterpret_tensor( buf5, (4, 4), (4, 1), 0), buf6, reinterpret_tensor(buf9, (16, 4), ( 4, 1), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf13, primals_14, buf17, primals_12, primals_8, reinterpret_tensor( buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1 ), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0) class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_Attention(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_Attention, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): batch_size = x.size(0) Q = self.fc_Q(x) K = self.fc_K(x) V = self.fc_V(x) Q = Q.view(batch_size * self.num_head, -1, self.dim_head) K = K.view(batch_size * self.num_head, -1, self.dim_head) V = V.view(batch_size * self.num_head, -1, self.dim_head) scale = K.size(-1) ** -0.5 context = self.attention(Q, K, V, scale) context = context.view(batch_size, -1, self.dim_head * self.num_head) out = self.fc(context) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class Position_wise_Feed_Forward(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_Forward, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class EncoderNew(nn.Module): def __init__(self, dim_model, num_head, hidden, dropout): super(EncoderNew, self).__init__() self.attention = Multi_Head_Attention(dim_model, num_head, dropout) self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout) def forward(self, input_0): primals_1 = self.attention.fc_Q.weight primals_3 = self.attention.fc_Q.bias primals_2 = self.attention.fc_K.weight primals_5 = self.attention.fc_K.bias primals_4 = self.attention.fc_V.weight primals_7 = self.attention.fc_V.bias primals_6 = self.attention.fc.weight primals_9 = self.attention.fc.bias primals_10 = self.attention.layer_norm.weight primals_11 = self.attention.layer_norm.bias primals_8 = self.feed_forward.fc1.weight primals_13 = self.feed_forward.fc1.bias primals_12 = self.feed_forward.fc2.weight primals_15 = self.feed_forward.fc2.bias primals_16 = self.feed_forward.layer_norm.weight primals_17 = self.feed_forward.layer_norm.bias primals_14 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
Ch4ndelier/Transformer_Zero_Velocity_classification
Encoder
false
17,105
[ "MIT" ]
6
857efb66189c503e983c11bd7dde16ad19c51ada
https://github.com/Ch4ndelier/Transformer_Zero_Velocity_classification/tree/857efb66189c503e983c11bd7dde16ad19c51ada
DiffLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/n5/cn5owfanpxjsewebvo7fjblplecjnwynmfqg4muuhibn6pxktoy2.py # Topologically Sorted Source Nodes: [input1_mean, input1_1, norm, add, input1_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # input1_1 => sub # input1_l2 => div # input1_mean => mean # norm => pow_1, sum_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mean_sub_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-06 tmp18 = tmp16 + tmp17 tmp19 = tmp10 / tmp18 tl.store(out_ptr2 + (r1 + (64*x0)), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mv/cmvguochvvcpa5scnnhfs2licvagcl5gxn2nbpmiyvm75ldyiyed.py # Topologically Sorted Source Nodes: [pow_1, diff_loss], Original ATen: [aten.pow, aten.mean] # Source node to ATen node mapping: # diff_loss => mean_2 # pow_1 => pow_5 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mm, 2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {}) triton_red_fused_mean_pow_1 = async_compile.triton('triton_red_fused_mean_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [input1_mean, input1_1, norm, add, input1_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mean_sub_0.run(arg0_1, buf4, 4, 64, grid=grid(4), stream=stream0) del arg0_1 buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [input2_mean, input2_1, norm_1, add_1, input2_l2], Original ATen: [aten.mean, aten.sub, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mean_sub_0.run(arg1_1, buf5, 4, 64, grid=grid(4), stream=stream0) del arg1_1 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, input2_l2, mm], Original ATen: [aten.add, aten.div, aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0), buf5, out=buf6) del buf4 del buf5 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [pow_1, diff_loss], Original ATen: [aten.pow, aten.mean] triton_red_fused_mean_pow_1.run(buf8, buf6, 1, 4096, grid=grid(1), stream=stream0) del buf6 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DiffLoss(nn.Module): def __init__(self): super(DiffLoss, self).__init__() def forward(self, input1, input2): batch_size = input1.size(0) input1 = input1.view(batch_size, -1) input2 = input2.view(batch_size, -1) input1_mean = torch.mean(input1, dim=0, keepdims=True) input2_mean = torch.mean(input2, dim=0, keepdims=True) input1 = input1 - input1_mean input2 = input2 - input2_mean input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach() input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-06) input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach() input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-06) diff_loss = torch.mean(input1_l2.t().mm(input2_l2).pow(2)) return diff_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-06 tmp18 = tmp16 + tmp17 tmp19 = tmp10 / tmp18 tl.store(out_ptr2 + (r1 + 64 * x0), tmp19, xmask) @triton.jit def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg0_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg1_1, buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0), buf5, out=buf6) del buf4 del buf5 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_red_fused_mean_pow_1[grid(1)](buf8, buf6, 1, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del buf6 return buf8, class DiffLossNew(nn.Module): def __init__(self): super(DiffLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Columbine21/TFR-Net
DiffLoss
false
17,106
[ "MIT" ]
7
1da01577542e7f477fdf7323ec0696aebc632357
https://github.com/Columbine21/TFR-Net/tree/1da01577542e7f477fdf7323ec0696aebc632357
MSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cw/ccw3nx2nse6tf4xwqzamaecuni44pkzdjbel4g7dfiheyqcgfp43.py # Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # diffs => add # mse => div # neg => neg # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {}) triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.00390625 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, diffs, pow_1, sum_1, mse], Original ATen: [aten.neg, aten.add, aten.pow, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MSE(nn.Module): def __init__(self): super(MSE, self).__init__() def forward(self, pred, real): diffs = torch.add(real, -pred) n = torch.numel(diffs.data) mse = torch.sum(diffs.pow(2)) / n return mse def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.00390625 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSENew(nn.Module): def __init__(self): super(MSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Columbine21/TFR-Net
MSE
false
17,107
[ "MIT" ]
7
1da01577542e7f477fdf7323ec0696aebc632357
https://github.com/Columbine21/TFR-Net/tree/1da01577542e7f477fdf7323ec0696aebc632357
Unet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lh/clh62q2fqkcwgfxgiraifucbw5zhy5t3ajiun5jsf5dy7wadk3y6.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mw/cmwjg2bno3dxx6o4yhurakjovmiwaa5s7nsptshsjyp3i2fqtce5.py # Topologically Sorted Source Nodes: [x_2, iadd, x_3], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd => add # x_2 => convolution_1 # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %primals_1), kwargs = {}) # %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %add, 3, 0, 9223372036854775807), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_1, %slice_scatter_default, 1, 0, 1), kwargs = {}) # %slice_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 1, 0, 1), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_2, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_2, 0.01), kwargs = {}) # %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %slice_scatter_default_2, %mul_1), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 32 x3 = xindex x0 = xindex % 4096 x2 = (xindex // 131072) tmp21 = tl.load(in_ptr0 + (x3), None) tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp3, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp2, eviction_policy='evict_last', other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + (x3), tmp27, None) tl.store(out_ptr1 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/s4/cs47te7mz3iwdgszkucmdcxpyckvs4b762plxxe2frt675ykabvv.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_4 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 32), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/w7/cw7ufaa2y5epljtalhqmptzrbibmif7zcowz32calexsd4fykcog.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_5 => convolution_3 # x_6 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_3, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zw/czwn3fetopwkj6n57ixzfznw3g2k3a64izizadkdpppnv3e6jny6.py # Topologically Sorted Source Nodes: [x_7, iadd_1, x_8], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_1 => add_1 # x_7 => convolution_4 # x_8 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_39, %convolution_2), kwargs = {}) # %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %add_1, 3, 0, 9223372036854775807), kwargs = {}) # %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_4, %slice_scatter_default_3, 1, 0, 32), kwargs = {}) # %slice_scatter_default_5 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %slice_49, 1, 0, 32), kwargs = {}) # %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_5, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_5, 0.01), kwargs = {}) # %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %slice_scatter_default_5, %mul_3), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 1024) % 64 x3 = xindex x2 = (xindex // 65536) x4 = xindex % 65536 tmp21 = tl.load(in_ptr0 + (x3), None) tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 32, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + (32768*x2)), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + (32768*x2)), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + (x3), tmp27, None) tl.store(out_ptr1 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xf/cxfdcedqg7j66kfjkniljvcff5oey5ph5yeybebu5m7ey5m7s5im.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_9 => convolution_5 # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_12, %primals_13, [2, 2], [0, 0], [1, 1], False, [0, 0], 64), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2m/c2mcwvsxpnqylu52xqef25afcvl6hqcm7cwcwtkjf24lhj5gsrvv.py # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_10 => convolution_6 # x_11 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_5, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.01), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_6, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3y/c3yivxkmjbiijwakylbhwlgluufmo32rygls3xm24vchzvq4zyin.py # Topologically Sorted Source Nodes: [x_12, iadd_2, x_13], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_2 => add_2 # x_12 => convolution_7 # x_13 => gt_5, mul_5, where_5 # Graph fragment: # %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_76, %convolution_5), kwargs = {}) # %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_2, %add_2, 3, 0, 9223372036854775807), kwargs = {}) # %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_7, %slice_scatter_default_6, 1, 0, 64), kwargs = {}) # %slice_scatter_default_8 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %slice_86, 1, 0, 64), kwargs = {}) # %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_8, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_8, 0.01), kwargs = {}) # %where_5 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %slice_scatter_default_8, %mul_5), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 256) % 128 x3 = xindex x2 = (xindex // 32768) x4 = xindex % 32768 tmp21 = tl.load(in_ptr0 + (x3), None) tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 64, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + (16384*x2)), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + (16384*x2)), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + (x3), tmp27, None) tl.store(out_ptr1 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jb/cjbruwuwwt462egmrevtjpa5mwgb7xkbdonhzv4jyrqupypjy6fi.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_14 => convolution_8 # Graph fragment: # %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_18, %primals_19, [2, 2], [0, 0], [1, 1], False, [0, 0], 128), kwargs = {}) triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xr/cxroahdzdseogyax62dgilfdboz3ccfoe4ngbzmwm6dbmp4zaifv.py # Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_15 => convolution_9 # x_16 => gt_6, mul_6, where_6 # Graph fragment: # %convolution_9 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_9, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_9, 0.01), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_9, %mul_6), kwargs = {}) triton_poi_fused_convolution_leaky_relu_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jn/cjnvtfjzpdnqug5kvjmimzeyuh5oi7panrv26xz5qcggbyxq2dbv.py # Topologically Sorted Source Nodes: [x_17, iadd_3, x_18], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_3 => add_3 # x_17 => convolution_10 # x_18 => gt_7, mul_7, where_7 # Graph fragment: # %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_113, %convolution_8), kwargs = {}) # %slice_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_3, %add_3, 3, 0, 9223372036854775807), kwargs = {}) # %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_10, %slice_scatter_default_9, 1, 0, 128), kwargs = {}) # %slice_scatter_default_11 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %slice_123, 1, 0, 128), kwargs = {}) # %gt_7 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_11, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_11, 0.01), kwargs = {}) # %where_7 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %slice_scatter_default_11, %mul_7), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 256 x3 = xindex x2 = (xindex // 16384) x4 = xindex % 16384 tmp21 = tl.load(in_ptr0 + (x3), None) tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 128, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + (8192*x2)), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + (8192*x2)), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + (x3), tmp27, None) tl.store(out_ptr1 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/c6/cc6bovsihm7mregxtaem5flw2mwn7fw4wuhtzp6b77b4bfwg2p7o.py # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_19 => convolution_11 # Graph fragment: # %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_7, %primals_24, %primals_25, [2, 2], [0, 0], [1, 1], False, [0, 0], 256), kwargs = {}) triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wu/cwudpullgf2gfzwx27mmngw3vrl5ansb4ot33rn5hcsjnawsrw73.py # Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_20 => convolution_12 # x_21 => gt_8, mul_8, where_8 # Graph fragment: # %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_12, 0.01), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_12, %mul_8), kwargs = {}) triton_poi_fused_convolution_leaky_relu_12 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ue/cueedoig4436pgo4wusyjysy47uvjv65evcu3vogfmbns2zzt2hq.py # Topologically Sorted Source Nodes: [x_22, iadd_4, x_23], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_4 => add_4 # x_22 => convolution_13 # x_23 => gt_9, mul_9, where_9 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_8, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_13, %convolution_11), kwargs = {}) # %gt_9 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_4, 0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 0.01), kwargs = {}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_4, %mul_9), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_13 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x3), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hp/chpusbntx57eqvxjqio3qressni2rmhwhw452bq5vpopq2ieqpyh.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_25 => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_14, %where_7], 1), kwargs = {}) triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 512 x0 = xindex % 64 x2 = (xindex // 32768) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 512, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ei/ceiqvokmmjwaetrc5477hzp73xoalmipqopitwba33mxrdgyxmjl.py # Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_26 => convolution_15 # x_27 => gt_10, mul_10, where_10 # Graph fragment: # %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_15, 0), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.01), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_15, %mul_10), kwargs = {}) triton_poi_fused_convolution_leaky_relu_15 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cj/ccjzubqbx5pzslgrkfyzn76xsjsy3z4ds2w63pcr6jywqnmotmvr.py # Topologically Sorted Source Nodes: [x_28, iadd_5, x_29], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_5 => add_5 # x_28 => convolution_16 # x_29 => gt_11, mul_11, where_11 # Graph fragment: # %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_16, %slice_181), kwargs = {}) # %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_5, 0), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.01), kwargs = {}) # %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %add_5, %mul_11), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 x2 = (xindex // 8192) x4 = xindex % 8192 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + (32768*x2)), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/a4/ca43w7rdeuahzd66dkdasxlibxuytmebuujqgfo3z5zgsf2t26gz.py # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_31 => cat_1 # Graph fragment: # %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_17, %where_5], 1), kwargs = {}) triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 256) % 256 x0 = xindex % 256 x2 = (xindex // 65536) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 256, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5i/c5iip5wckp553dhx2qnwmws6nidisiynmxxlrw27aodjzhvhkz2j.py # Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_32 => convolution_18 # x_33 => gt_12, mul_12, where_12 # Graph fragment: # %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_18, 0.01), kwargs = {}) # %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %convolution_18, %mul_12), kwargs = {}) triton_poi_fused_convolution_leaky_relu_18 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nh/cnhicgihd23vl5cwk3dc4shjikphhvljjutk2gjgiqmehdxy57iq.py # Topologically Sorted Source Nodes: [x_34, iadd_6, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_6 => add_6 # x_34 => convolution_19 # x_35 => gt_13, mul_13, where_13 # Graph fragment: # %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_12, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_19, %slice_210), kwargs = {}) # %gt_13 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.01), kwargs = {}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %add_6, %mul_13), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_19 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 x2 = (xindex // 16384) x4 = xindex % 16384 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + (65536*x2)), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lv/clv3l7v76yjchpniaxgtlruitp5msegv27blanwfhzxvgbvpp3x7.py # Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_37 => cat_2 # Graph fragment: # %cat_2 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_20, %where_3], 1), kwargs = {}) triton_poi_fused_cat_20 = async_compile.triton('triton_poi_fused_cat_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 1024) % 128 x0 = xindex % 1024 x2 = (xindex // 131072) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 128, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zr/czrivbowjfbbnhl4zseibi76qjsoya6h5d5wm2lde63cu3fg45mc.py # Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_38 => convolution_21 # x_39 => gt_14, mul_14, where_14 # Graph fragment: # %convolution_21 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_44, %primals_45, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_21, 0), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_21, 0.01), kwargs = {}) # %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %convolution_21, %mul_14), kwargs = {}) triton_poi_fused_convolution_leaky_relu_21 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 32 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/rl/crltropoedeanb7xqqvzrj3bmbyp3wrol5dnoazgej6b3zttaice.py # Topologically Sorted Source Nodes: [x_40, iadd_7, x_41], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_7 => add_7 # x_40 => convolution_22 # x_41 => gt_15, mul_15, where_15 # Graph fragment: # %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_14, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_22, %slice_239), kwargs = {}) # %gt_15 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_7, 0), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.01), kwargs = {}) # %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %add_7, %mul_15), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 32 x2 = (xindex // 32768) x4 = xindex % 32768 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + (131072*x2)), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/p7/cp7xwhawscoftt257di4ch36xdf3a5un3tvuzfama6i27tuznb2v.py # Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_43 => cat_3 # Graph fragment: # %cat_3 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_23, %where_1], 1), kwargs = {}) triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 64 x0 = xindex % 4096 x2 = (xindex // 262144) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 64, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ls/clsv6arnkcbhsxcnzsj56bt5hdkybqal2q66sbmjripcapmztmm5.py # Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_44 => convolution_24 # x_45 => gt_16, mul_16, where_16 # Graph fragment: # %convolution_24 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_50, %primals_51, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_24, 0), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_24, 0.01), kwargs = {}) # %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_16, %convolution_24, %mul_16), kwargs = {}) triton_poi_fused_convolution_leaky_relu_24 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.01 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tl.store(out_ptr0 + (x0), tmp5, None) tl.store(out_ptr1 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/sp/cspriff7yafugdugrnvsrc3car2umzek65gaipvfo3qj3fvqiczn.py # Topologically Sorted Source Nodes: [x_46, iadd_8, x_47], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # iadd_8 => add_8 # x_46 => convolution_25 # x_47 => gt_17, mul_17, where_17 # Graph fragment: # %convolution_25 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_16, %primals_52, %primals_53, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_8 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_25, %slice_268), kwargs = {}) # %gt_17 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_8, 0), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 0.01), kwargs = {}) # %where_17 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %add_8, %mul_17), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_25 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 x1 = (xindex // 4096) tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (x0 + (262144*x1)), None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.01 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp7, None) tl.store(out_ptr1 + (x2), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (32, ), (1, )) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128, ), (1, )) assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_19, (128, ), (1, )) assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (256, ), (1, )) assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_23, (256, ), (1, )) assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_25, (256, ), (1, )) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (256, ), (1, )) assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_31, (256, ), (1, )) assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (128, ), (1, )) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128, ), (1, )) assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_37, (128, ), (1, )) assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (64, ), (1, )) assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_41, (64, ), (1, )) assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_43, (64, ), (1, )) assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_45, (32, ), (1, )) assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (32, ), (1, )) assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_49, (32, ), (1, )) assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_51, (1, ), (1, )) assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_53, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 524288, grid=grid(524288), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_2, iadd, x_3], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_1.run(buf3, primals_5, primals_1, buf4, buf5, 524288, grid=grid(524288), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf7, primals_7, 131072, grid=grid(131072), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf8, primals_9, buf9, buf10, 262144, grid=grid(262144), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf13 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_7, iadd_1, x_8], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_4.run(buf11, primals_11, buf7, buf12, buf13, 262144, grid=grid(262144), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf15, primals_13, 65536, grid=grid(65536), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1)) buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf16, primals_15, buf17, buf18, 131072, grid=grid(131072), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf21 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x_12, iadd_2, x_13], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_7.run(buf19, primals_17, buf15, buf20, buf21, 131072, grid=grid(131072), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=128, bias=None) assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] triton_poi_fused_convolution_8.run(buf23, primals_19, 32768, grid=grid(32768), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_9.run(buf24, primals_21, buf25, buf26, 65536, grid=grid(65536), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) buf29 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [x_17, iadd_3, x_18], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_10.run(buf27, primals_23, buf23, buf28, buf29, 65536, grid=grid(65536), stream=stream0) del buf27 del primals_23 # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=256, bias=None) assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] triton_poi_fused_convolution_11.run(buf31, primals_25, 16384, grid=grid(16384), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1)) buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool) buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_12.run(buf32, primals_27, buf33, buf34, 16384, grid=grid(16384), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1)) buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool) buf37 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [x_22, iadd_4, x_23], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_13.run(buf35, primals_29, buf31, buf36, buf37, 16384, grid=grid(16384), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=256, bias=None) assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1)) buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat] triton_poi_fused_cat_14.run(buf38, primals_31, buf29, buf39, 131072, grid=grid(131072), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1)) buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool) buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_15.run(buf40, primals_33, buf41, buf42, 32768, grid=grid(32768), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1)) buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool) buf45 = buf40; del buf40 # reuse # Topologically Sorted Source Nodes: [x_28, iadd_5, x_29], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_16.run(buf43, primals_35, buf39, buf44, buf45, 32768, grid=grid(32768), stream=stream0) del buf43 del primals_35 # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=128, bias=None) assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1)) buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf46, primals_37, buf21, buf47, 262144, grid=grid(262144), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1)) buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16, 1), 0); del buf38 # reuse # Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_18.run(buf48, primals_39, buf49, buf50, 65536, grid=grid(65536), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1)) buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf53 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [x_34, iadd_6, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_19.run(buf51, primals_41, buf47, buf52, buf53, 65536, grid=grid(65536), stream=stream0) del buf51 del primals_41 # Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat] triton_poi_fused_cat_20.run(buf54, primals_43, buf13, buf55, 524288, grid=grid(524288), stream=stream0) del buf54 del primals_43 # Topologically Sorted Source Nodes: [x_38], Original ATen: [aten.convolution] buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1), 0); del buf46 # reuse # Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_21.run(buf56, primals_45, buf57, buf58, 131072, grid=grid(131072), stream=stream0) del primals_45 # Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution] buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf61 = buf56; del buf56 # reuse # Topologically Sorted Source Nodes: [x_40, iadd_7, x_41], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_22.run(buf59, primals_47, buf55, buf60, buf61, 131072, grid=grid(131072), stream=stream0) del buf59 del primals_47 # Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution] buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat] triton_poi_fused_cat_23.run(buf62, primals_49, buf5, buf63, 1048576, grid=grid(1048576), stream=stream0) del buf62 del primals_49 # Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf35 # reuse # Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_24.run(buf64, primals_51, buf65, buf66, 16384, grid=grid(16384), stream=stream0) del primals_51 # Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution] buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf69 = buf64; del buf64 # reuse # Topologically Sorted Source Nodes: [x_46, iadd_8, x_47], Original ATen: [aten.convolution, aten.add, aten.leaky_relu] triton_poi_fused_add_convolution_leaky_relu_25.run(buf67, primals_53, buf63, buf68, buf69, 16384, grid=grid(16384), stream=stream0) del buf67 del primals_53 return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10, buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((128, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((256, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((128, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((64, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((64, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((32, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((32, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm= 'batch', residual=True, activation='leakyrelu', transpose=False): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) elif norm == 'mixed': self.norm1 = nn.BatchNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=1) if self.activation == 'relu': self.actfun1 = nn.ReLU() self.actfun2 = nn.ReLU() elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU() self.actfun2 = nn.LeakyReLU() elif self.activation == 'elu': self.actfun1 = nn.ELU() self.actfun2 = nn.ELU() elif self.activation == 'selu': self.actfun1 = nn.SELU() self.actfun2 = nn.SELU() def forward(self, x): ox = x x = self.conv1(x) if self.dropout: x = self.dropout1(x) if self.norm1: x = self.norm1(x) x = self.actfun1(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.norm2: x = self.norm2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) return x class Unet(nn.Module): def __init__(self, n_channel_in=1, n_channel_out=1, residual=False, down='conv', up='tconv', activation='selu'): super(Unet, self).__init__() self.residual = residual if down == 'maxpool': self.down1 = nn.MaxPool2d(kernel_size=2) self.down2 = nn.MaxPool2d(kernel_size=2) self.down3 = nn.MaxPool2d(kernel_size=2) self.down4 = nn.MaxPool2d(kernel_size=2) elif down == 'avgpool': self.down1 = nn.AvgPool2d(kernel_size=2) self.down2 = nn.AvgPool2d(kernel_size=2) self.down3 = nn.AvgPool2d(kernel_size=2) self.down4 = nn.AvgPool2d(kernel_size=2) elif down == 'conv': self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32) self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64) self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2, groups=128) self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2, groups=256) self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25 self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25 self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25 self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25 self.down1.bias.data = 0.01 * self.down1.bias.data + 0 self.down2.bias.data = 0.01 * self.down2.bias.data + 0 self.down3.bias.data = 0.01 * self.down3.bias.data + 0 self.down4.bias.data = 0.01 * self.down4.bias.data + 0 if up == 'bilinear' or up == 'nearest': self.up1 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up2 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up3 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up4 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) elif up == 'tconv': self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, groups=256) self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, groups=128) self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2, groups=64) self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2, groups=32) self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25 self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25 self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25 self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25 self.up1.bias.data = 0.01 * self.up1.bias.data + 0 self.up2.bias.data = 0.01 * self.up2.bias.data + 0 self.up3.bias.data = 0.01 * self.up3.bias.data + 0 self.up4.bias.data = 0.01 * self.up4.bias.data + 0 self.conv1 = ConvBlock(n_channel_in, 32, residual, activation) self.conv2 = ConvBlock(32, 64, residual, activation) self.conv3 = ConvBlock(64, 128, residual, activation) self.conv4 = ConvBlock(128, 256, residual, activation) self.conv5 = ConvBlock(256, 256, residual, activation) self.conv6 = ConvBlock(2 * 256, 128, residual, activation) self.conv7 = ConvBlock(2 * 128, 64, residual, activation) self.conv8 = ConvBlock(2 * 64, 32, residual, activation) self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation) if self.residual: self.convres = ConvBlock(n_channel_in, n_channel_out, residual, activation) def forward(self, x): c0 = x c1 = self.conv1(x) x = self.down1(c1) c2 = self.conv2(x) x = self.down2(c2) c3 = self.conv3(x) x = self.down3(c3) c4 = self.conv4(x) x = self.down4(c4) x = self.conv5(x) x = self.up1(x) x = torch.cat([x, c4], 1) x = self.conv6(x) x = self.up2(x) x = torch.cat([x, c3], 1) x = self.conv7(x) x = self.up3(x) x = torch.cat([x, c2], 1) x = self.conv8(x) x = self.up4(x) x = torch.cat([x, c1], 1) x = self.conv9(x) if self.residual: x = torch.add(x, self.convres(c0)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 32 x3 = xindex x0 = xindex % 4096 x2 = xindex // 131072 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp3, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp2, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 64 x3 = xindex x2 = xindex // 65536 x4 = xindex % 65536 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 32, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 128 x3 = xindex x2 = xindex // 32768 x4 = xindex % 32768 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 64, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 256 x3 = xindex x2 = xindex // 16384 x4 = xindex % 16384 tmp21 = tl.load(in_ptr0 + x3, None) tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 128, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tmp2 & tmp2 tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0) tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 + tmp5 tmp7 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp3, other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0) tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0 ) tmp13 = tmp11 + tmp12 tmp14 = tl.where(tmp2, tmp10, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp2, tmp14, tmp15) tmp17 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp2, other=0.0) tmp18 = tmp13 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp2, tmp18, tmp19) tmp23 = tmp21 + tmp22 tmp24 = tl.where(tmp2, tmp20, tmp23) tmp25 = tl.where(tmp2, tmp16, tmp24) tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr0 + x3, tmp27, None) tl.store(out_ptr1 + x3, tmp30, None) @triton.jit def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 x2 = xindex // 8192 x4 = xindex % 8192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 32768 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 x2 = xindex // 16384 x4 = xindex % 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 65536 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 x2 = xindex // 32768 x4 = xindex % 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4 + 131072 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.01 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.01 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tl.store(out_ptr0 + x0, tmp5, None) tl.store(out_ptr1 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 x1 = xindex // 4096 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (x0 + 262144 * x1), None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.01 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp7, None) tl.store(out_ptr1 + x2, tmp10, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 ) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (32,), (1,)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_43, (64,), (1,)) assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (32,), (1,)) assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_49, (32,), (1,)) assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_51, (1,), (1,)) assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_53, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0, primals_3, buf1, buf2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_add_convolution_leaky_relu_1[grid(524288)](buf3, primals_5, primals_1, buf4, buf5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_2[grid(131072)](buf7, primals_7, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_3[grid(262144)](buf8, primals_9, buf9, buf10, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf13 = buf8 del buf8 triton_poi_fused_add_convolution_leaky_relu_4[grid(262144)](buf11, primals_11, buf7, buf12, buf13, 262144, XBLOCK=1024, num_warps= 4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_5[grid(65536)](buf15, primals_13, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1)) buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_6[grid(131072)](buf16, primals_15, buf17, buf18, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf21 = buf16 del buf16 triton_poi_fused_add_convolution_leaky_relu_7[grid(131072)](buf19, primals_17, buf15, buf20, buf21, 131072, XBLOCK=1024, num_warps =4, num_stages=1) del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=128, bias=None) assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_8[grid(32768)](buf23, primals_19, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_leaky_relu_9[grid(65536)](buf24, primals_21, buf25, buf26, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_21 buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf29 = buf24 del buf24 triton_poi_fused_add_convolution_leaky_relu_10[grid(65536)](buf27, primals_23, buf23, buf28, buf29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf27 del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=256, bias=None) assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_11[grid(16384)](buf31, primals_25, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1)) buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool ) buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_12[grid(16384)](buf32, primals_27, buf33, buf34, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1)) buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool ) buf37 = buf32 del buf32 triton_poi_fused_add_convolution_leaky_relu_13[grid(16384)](buf35, primals_29, buf31, buf36, buf37, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_29 buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=256, bias=None) assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1)) buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0) del buf19 triton_poi_fused_cat_14[grid(131072)](buf38, primals_31, buf29, buf39, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1)) buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_15[grid(32768)](buf40, primals_33, buf41, buf42, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_33 buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1)) buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) buf45 = buf40 del buf40 triton_poi_fused_add_convolution_leaky_relu_16[grid(32768)](buf43, primals_35, buf39, buf44, buf45, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf43 del primals_35 buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=128, bias=None) assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1)) buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16, 1), 0) del buf11 triton_poi_fused_cat_17[grid(262144)](buf46, primals_37, buf21, buf47, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_37 buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1)) buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16, 1), 0) del buf38 triton_poi_fused_convolution_leaky_relu_18[grid(65536)](buf48, primals_39, buf49, buf50, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_39 buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1)) buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) buf53 = buf48 del buf48 triton_poi_fused_add_convolution_leaky_relu_19[grid(65536)](buf51, primals_41, buf47, buf52, buf53, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf51 del primals_41 buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0) del buf3 triton_poi_fused_cat_20[grid(524288)](buf54, primals_43, buf13, buf55, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf54 del primals_43 buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1), 0) del buf46 triton_poi_fused_convolution_leaky_relu_21[grid(131072)](buf56, primals_45, buf57, buf58, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_45 buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool) buf61 = buf56 del buf56 triton_poi_fused_add_convolution_leaky_relu_22[grid(131072)](buf59, primals_47, buf55, buf60, buf61, 131072, XBLOCK=1024, num_warps =4, num_stages=1) del buf59 del primals_47 buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_23[grid(1048576)](buf62, primals_49, buf5, buf63, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf62 del primals_49 buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf35 triton_poi_fused_convolution_leaky_relu_24[grid(16384)](buf64, primals_51, buf65, buf66, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_51 buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) buf69 = buf64 del buf64 triton_poi_fused_add_convolution_leaky_relu_25[grid(16384)](buf67, primals_53, buf63, buf68, buf69, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf67 del primals_53 return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10, buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, dropout=False, norm= 'batch', residual=True, activation='leakyrelu', transpose=False): super(ConvBlock, self).__init__() self.dropout = dropout self.residual = residual self.activation = activation self.transpose = transpose if self.dropout: self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.05) self.norm1 = None self.norm2 = None if norm == 'batch': self.norm1 = nn.BatchNorm2d(out_channels) self.norm2 = nn.BatchNorm2d(out_channels) elif norm == 'instance': self.norm1 = nn.InstanceNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) elif norm == 'mixed': self.norm1 = nn.BatchNorm2d(out_channels, affine=True) self.norm2 = nn.InstanceNorm2d(out_channels, affine=True) if self.transpose: self.conv1 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, padding=1) else: self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size= 3, padding=1) if self.activation == 'relu': self.actfun1 = nn.ReLU() self.actfun2 = nn.ReLU() elif self.activation == 'leakyrelu': self.actfun1 = nn.LeakyReLU() self.actfun2 = nn.LeakyReLU() elif self.activation == 'elu': self.actfun1 = nn.ELU() self.actfun2 = nn.ELU() elif self.activation == 'selu': self.actfun1 = nn.SELU() self.actfun2 = nn.SELU() def forward(self, x): ox = x x = self.conv1(x) if self.dropout: x = self.dropout1(x) if self.norm1: x = self.norm1(x) x = self.actfun1(x) x = self.conv2(x) if self.dropout: x = self.dropout2(x) if self.norm2: x = self.norm2(x) if self.residual: x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox. shape[1], x.shape[1]), :, :] x = self.actfun2(x) return x class UnetNew(nn.Module): def __init__(self, n_channel_in=1, n_channel_out=1, residual=False, down='conv', up='tconv', activation='selu'): super(UnetNew, self).__init__() self.residual = residual if down == 'maxpool': self.down1 = nn.MaxPool2d(kernel_size=2) self.down2 = nn.MaxPool2d(kernel_size=2) self.down3 = nn.MaxPool2d(kernel_size=2) self.down4 = nn.MaxPool2d(kernel_size=2) elif down == 'avgpool': self.down1 = nn.AvgPool2d(kernel_size=2) self.down2 = nn.AvgPool2d(kernel_size=2) self.down3 = nn.AvgPool2d(kernel_size=2) self.down4 = nn.AvgPool2d(kernel_size=2) elif down == 'conv': self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32) self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64) self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2, groups=128) self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2, groups=256) self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25 self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25 self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25 self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25 self.down1.bias.data = 0.01 * self.down1.bias.data + 0 self.down2.bias.data = 0.01 * self.down2.bias.data + 0 self.down3.bias.data = 0.01 * self.down3.bias.data + 0 self.down4.bias.data = 0.01 * self.down4.bias.data + 0 if up == 'bilinear' or up == 'nearest': self.up1 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up2 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up3 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) self.up4 = lambda x: nn.functional.interpolate(x, mode=up, scale_factor=2) elif up == 'tconv': self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, groups=256) self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, groups=128) self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2, groups=64) self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2, groups=32) self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25 self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25 self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25 self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25 self.up1.bias.data = 0.01 * self.up1.bias.data + 0 self.up2.bias.data = 0.01 * self.up2.bias.data + 0 self.up3.bias.data = 0.01 * self.up3.bias.data + 0 self.up4.bias.data = 0.01 * self.up4.bias.data + 0 self.conv1 = ConvBlock(n_channel_in, 32, residual, activation) self.conv2 = ConvBlock(32, 64, residual, activation) self.conv3 = ConvBlock(64, 128, residual, activation) self.conv4 = ConvBlock(128, 256, residual, activation) self.conv5 = ConvBlock(256, 256, residual, activation) self.conv6 = ConvBlock(2 * 256, 128, residual, activation) self.conv7 = ConvBlock(2 * 128, 64, residual, activation) self.conv8 = ConvBlock(2 * 64, 32, residual, activation) self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation) if self.residual: self.convres = ConvBlock(n_channel_in, n_channel_out, residual, activation) def forward(self, input_0): primals_6 = self.down1.weight primals_3 = self.down1.bias primals_12 = self.down2.weight primals_9 = self.down2.bias primals_18 = self.down3.weight primals_15 = self.down3.bias primals_24 = self.down4.weight primals_21 = self.down4.bias primals_30 = self.up1.weight primals_23 = self.up1.bias primals_36 = self.up2.weight primals_17 = self.up2.bias primals_42 = self.up3.weight primals_11 = self.up3.bias primals_48 = self.up4.weight primals_5 = self.up4.bias primals_2 = self.conv1.conv1.weight primals_7 = self.conv1.conv1.bias primals_4 = self.conv1.conv2.weight primals_45 = self.conv1.conv2.bias primals_8 = self.conv2.conv1.weight primals_13 = self.conv2.conv1.bias primals_10 = self.conv2.conv2.weight primals_39 = self.conv2.conv2.bias primals_14 = self.conv3.conv1.weight primals_19 = self.conv3.conv1.bias primals_16 = self.conv3.conv2.weight primals_33 = self.conv3.conv2.bias primals_20 = self.conv4.conv1.weight primals_25 = self.conv4.conv1.bias primals_22 = self.conv4.conv2.weight primals_27 = self.conv4.conv2.bias primals_26 = self.conv5.conv1.weight primals_29 = self.conv5.conv1.bias primals_28 = self.conv5.conv2.weight primals_31 = self.conv5.conv2.bias primals_32 = self.conv6.conv1.weight primals_35 = self.conv6.conv1.bias primals_34 = self.conv6.conv2.weight primals_37 = self.conv6.conv2.bias primals_38 = self.conv7.conv1.weight primals_41 = self.conv7.conv1.bias primals_40 = self.conv7.conv2.weight primals_43 = self.conv7.conv2.bias primals_44 = self.conv8.conv1.weight primals_47 = self.conv8.conv1.bias primals_46 = self.conv8.conv2.weight primals_49 = self.conv8.conv2.bias primals_50 = self.conv9.conv1.weight primals_51 = self.conv9.conv1.bias primals_52 = self.conv9.conv2.weight primals_53 = self.conv9.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53]) return output[0]
BoHuangLab/timeunet
Unet
false
17,108
[ "MIT" ]
7
8fd34b18e9c4420db8172a402c243f7d03c853f1
https://github.com/BoHuangLab/timeunet/tree/8fd34b18e9c4420db8172a402c243f7d03c853f1
GatedConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/oi/coioicg7azjadgtusbhk6rfj4pa52tiwcwl4kcac6euqcgpchc5o.py # Topologically Sorted Source Nodes: [cat, x], Original ATen: [aten.cat, aten.elu] # Source node to ATen node mapping: # cat => cat # x => expm1, gt, mul, mul_2, where # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %neg], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%cat, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_cat_elu_0 = async_compile.triton('triton_poi_fused_cat_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp14 = 0.0 tmp15 = tmp13 > tmp14 tmp16 = 1.0 tmp17 = tmp13 * tmp16 tmp18 = libdevice.expm1(tmp17) tmp19 = tmp18 * tmp16 tmp20 = tl.where(tmp15, tmp17, tmp19) tl.store(out_ptr0 + (x3), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gp/cgpntocah2dupccykbymecnhyoickwgaqnjqkl6xtd4bqe6hakvz.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul_3, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) triton_per_fused__weight_norm_interface_1 = async_compile.triton('triton_per_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 72 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (72*x0)), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (72*x0)), tmp9, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qj/cqjk7crtcmjzdz5rtwdsrykpcha5zig652x7ej3e2tscp6njhye2.py # Topologically Sorted Source Nodes: [cat_1, x_2], Original ATen: [aten.cat, aten.elu] # Source node to ATen node mapping: # cat_1 => cat_1 # x_2 => expm1_1, gt_1, mul_4, mul_6, where_1 # Graph fragment: # %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %neg_1], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%cat_1, 0), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_4,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_4, %mul_6), kwargs = {}) triton_poi_fused_cat_elu_2 = async_compile.triton('triton_poi_fused_cat_elu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_elu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_elu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 8, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr1 + ((-4) + x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = -tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp9, tmp18) tmp20 = 0.0 tmp21 = tmp19 > tmp20 tmp22 = 1.0 tmp23 = tmp19 * tmp22 tmp24 = libdevice.expm1(tmp23) tmp25 = tmp24 * tmp22 tmp26 = tl.where(tmp21, tmp23, tmp25) tl.store(out_ptr0 + (x3), tmp19, xmask) tl.store(out_ptr1 + (x3), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ni/cnitiv53ewjbxkhshepa3cax464h2cpgktpwij56tvjmya25s52n.py # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm_1 => div_1, mul_7, pow_3, pow_4, sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_5, %pow_4), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div_1), kwargs = {}) triton_per_fused__weight_norm_interface_3 = async_compile.triton('triton_per_fused__weight_norm_interface_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[8, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 8 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (8*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (8*x0)), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/bs/cbs5glpgepdmub4fs7d66b7yoslapsca7dnu3z6gu62cawrw5tak.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_4 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %mul_7, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ci/cciqiay2tybvwtgpvdkrnyp4vdhwdjjbpoervm22pg4tvtynpxdj.py # Topologically Sorted Source Nodes: [sigmoid, x_5], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # x_5 => mul_8 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_5 = async_compile.triton('triton_poi_fused_mul_sigmoid_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + (64 + x0 + (128*x1)), xmask) tmp2 = tl.load(in_ptr0 + (x0 + (128*x1)), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + (x2), tmp1, xmask) tl.store(out_ptr1 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (8, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_7, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat, x], Original ATen: [aten.cat, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_cat_elu_0.run(primals_1, buf0, 512, grid=grid(512), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 8, 3, 3), (72, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_1.run(buf2, primals_3, primals_2, buf3, 4, 72, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1, x_2], Original ATen: [aten.cat, aten.elu] triton_poi_fused_cat_elu_2.run(buf4, primals_4, buf5, buf6, 512, grid=grid(512), stream=stream0) del primals_4 buf7 = empty_strided_cuda((8, 1, 1, 1), (1, 8, 8, 8), torch.float32) buf8 = reinterpret_tensor(buf7, (8, 1, 1, 1), (1, 1, 1, 1), 0); del buf7 # reuse buf9 = empty_strided_cuda((8, 8, 1, 1), (8, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_3.run(buf8, primals_6, primals_5, buf9, 8, 8, grid=grid(8), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf6, buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 8, 4, 4), (128, 16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf11, primals_7, 512, grid=grid(512), stream=stream0) del primals_7 buf12 = buf4; del buf4 # reuse buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_5], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_5.run(buf11, buf12, buf13, 256, grid=grid(256), stream=stream0) return (buf13, buf3, buf9, primals_2, primals_3, primals_5, primals_6, buf0, buf2, buf3, buf5, buf6, buf8, buf9, reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 16, 4, 1), 0), buf12, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def concat_elu(x): """Concatenated ReLU (http://arxiv.org/abs/1603.05201), but with ELU.""" return F.elu(torch.cat((x, -x), dim=1)) class WNConv2d(nn.Module): """Weight-normalized 2d convolution. Args: in_channels (int): Number of channels in the input. out_channels (int): Number of channels in the output. kernel_size (int): Side length of each convolutional kernel. padding (int): Padding to add on edges of input. bias (bool): Use bias in the convolution operation. """ def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True): super(WNConv2d, self).__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)) def forward(self, x): x = self.conv(x) return x class GatedConv(nn.Module): """Gated Convolution Block Originally used by PixelCNN++ (https://arxiv.org/pdf/1701.05517). Args: num_channels (int): Number of channels in hidden activations. drop_prob (float): Dropout probability. aux_channels (int): Number of channels in optional auxiliary input. """ def __init__(self, num_channels, drop_prob=0.0, aux_channels=None): super(GatedConv, self).__init__() self.nlin = concat_elu self.conv = WNConv2d(2 * num_channels, num_channels, kernel_size=3, padding=1) self.drop = nn.Dropout2d(drop_prob) self.gate = WNConv2d(2 * num_channels, 2 * num_channels, kernel_size=1, padding=0) if aux_channels is not None: self.aux_conv = WNConv2d(2 * aux_channels, num_channels, kernel_size=1, padding=0) else: self.aux_conv = None def forward(self, x, aux=None): x = self.nlin(x) x = self.conv(x) if aux is not None: aux = self.nlin(aux) x = x + self.aux_conv(aux) x = self.nlin(x) x = self.drop(x) x = self.gate(x) a, b = x.chunk(2, dim=1) x = a * torch.sigmoid(b) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp14 = 0.0 tmp15 = tmp13 > tmp14 tmp16 = 1.0 tmp17 = tmp13 * tmp16 tmp18 = libdevice.expm1(tmp17) tmp19 = tmp18 * tmp16 tmp20 = tl.where(tmp15, tmp17, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 72 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 72 * x0), rmask & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(rmask & xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 72 * x0), tmp9, rmask & xmask) @triton.jit def triton_poi_fused_cat_elu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr1 + (-4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = -tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp9, tmp18) tmp20 = 0.0 tmp21 = tmp19 > tmp20 tmp22 = 1.0 tmp23 = tmp19 * tmp22 tmp24 = libdevice.expm1(tmp23) tmp25 = tmp24 * tmp22 tmp26 = tl.where(tmp21, tmp23, tmp25) tl.store(out_ptr0 + x3, tmp19, xmask) tl.store(out_ptr1 + x3, tmp26, xmask) @triton.jit def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 8 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (64 + x0 + 128 * x1), xmask) tmp2 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (8, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_6, (8, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_elu_0[grid(512)](primals_1, buf0, 512, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 8, 3, 3), (72, 9, 3, 1), torch.float32) triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3, primals_2, buf3, 4, 72, XBLOCK=1, num_warps=2, num_stages=1) buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_elu_2[grid(512)](buf4, primals_4, buf5, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf7 = empty_strided_cuda((8, 1, 1, 1), (1, 8, 8, 8), torch.float32) buf8 = reinterpret_tensor(buf7, (8, 1, 1, 1), (1, 1, 1, 1), 0) del buf7 buf9 = empty_strided_cuda((8, 8, 1, 1), (8, 1, 1, 1), torch.float32) triton_per_fused__weight_norm_interface_3[grid(8)](buf8, primals_6, primals_5, buf9, 8, 8, XBLOCK=8, num_warps=2, num_stages=1) buf10 = extern_kernels.convolution(buf6, buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 8, 4, 4), (128, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_4[grid(512)](buf11, primals_7, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf12 = buf4 del buf4 buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_5[grid(256)](buf11, buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) return (buf13, buf3, buf9, primals_2, primals_3, primals_5, primals_6, buf0, buf2, buf3, buf5, buf6, buf8, buf9, reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 16, 4, 1), 0), buf12) def concat_elu(x): """Concatenated ReLU (http://arxiv.org/abs/1603.05201), but with ELU.""" return F.elu(torch.cat((x, -x), dim=1)) class WNConv2d(nn.Module): """Weight-normalized 2d convolution. Args: in_channels (int): Number of channels in the input. out_channels (int): Number of channels in the output. kernel_size (int): Side length of each convolutional kernel. padding (int): Padding to add on edges of input. bias (bool): Use bias in the convolution operation. """ def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True): super(WNConv2d, self).__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)) def forward(self, x): x = self.conv(x) return x class GatedConvNew(nn.Module): """Gated Convolution Block Originally used by PixelCNN++ (https://arxiv.org/pdf/1701.05517). Args: num_channels (int): Number of channels in hidden activations. drop_prob (float): Dropout probability. aux_channels (int): Number of channels in optional auxiliary input. """ def __init__(self, num_channels, drop_prob=0.0, aux_channels=None): super(GatedConvNew, self).__init__() self.nlin = concat_elu self.conv = WNConv2d(2 * num_channels, num_channels, kernel_size=3, padding=1) self.drop = nn.Dropout2d(drop_prob) self.gate = WNConv2d(2 * num_channels, 2 * num_channels, kernel_size=1, padding=0) if aux_channels is not None: self.aux_conv = WNConv2d(2 * aux_channels, num_channels, kernel_size=1, padding=0) else: self.aux_conv = None def forward(self, input_0): primals_4 = self.conv.conv.bias primals_2 = self.conv.conv.weight_g primals_3 = self.conv.conv.weight_v primals_7 = self.gate.conv.bias primals_5 = self.gate.conv.weight_g primals_6 = self.gate.conv.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Catherine0505/mar-scf-flow
GatedConv
false
17,109
[ "Apache-2.0" ]
10
aa7c3564cb9f2967c5e580a633516dba1b597f98
https://github.com/Catherine0505/mar-scf-flow/tree/aa7c3564cb9f2967c5e580a633516dba1b597f98
L1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yk/cyks2icyvnk2b4pd7r2kedzvq6f3vgtslw4ocvbbo7zqbogzbkib.py # Topologically Sorted Source Nodes: [sub, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => abs_1 # loss_1 => mean # loss_bbox => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1LossNew(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1LossNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CityU-AIM-Group/HTD
L1Loss
false
17,110
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
MSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/7s/c7slhd2yhynjifzztjeslhh6lo7ff2yggulq3ttsnhaa7rsq2gjx.py # Topologically Sorted Source Nodes: [loss, loss_1, loss_2], Original ATen: [aten.mse_loss, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => pow_1, sub # loss_1 => mean # loss_2 => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mean_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mean_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, loss_1, loss_2], Original ATen: [aten.mse_loss, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mean_mse_loss_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ loss = self.loss_weight * mse_loss(pred, target, weight, reduction= self.reduction, avg_factor=avg_factor) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class MSELossNew(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CityU-AIM-Group/HTD
MSELoss
false
17,111
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
HingeDiscriminatorLossCutMix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cs/ccszgvcjmwpkpxyy3tnzahjygq47kygvsdxf46r6ihjibfchfqsr.py # Topologically Sorted Source Nodes: [tensor, sub, minimum, mul, mean, loss_real, tensor_1, neg_1, sub_1, minimum_1, neg_2, add, mul_1, mean_1, loss_fake], Original ATen: [aten.lift_fresh, aten.sub, aten.minimum, aten.mul, aten.mean, aten.neg, aten.add] # Source node to ATen node mapping: # add => add # loss_fake => neg_3 # loss_real => neg # mean => mean # mean_1 => mean_1 # minimum => minimum # minimum_1 => minimum_1 # mul => mul # mul_1 => mul_1 # neg_1 => neg_1 # neg_2 => neg_2 # sub => sub # sub_1 => sub_1 # tensor => full_default # tensor_1 => full_default_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %sub), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum, %arg1_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_1, 1.0), kwargs = {}) # %minimum_1 : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %sub_1), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_2, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum_1, %add), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {}) triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0 = async_compile.triton('triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp3, tmp2) tmp6 = tmp4 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = -tmp0 tmp11 = tmp10 - tmp1 tmp12 = triton_helpers.minimum(tmp3, tmp11) tmp13 = -tmp5 tmp14 = tmp13 + tmp1 tmp15 = tmp12 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 256.0 tmp20 = tmp9 / tmp19 tmp21 = -tmp20 tmp22 = tmp18 / tmp19 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [tensor, sub, minimum, mul, mean, loss_real, tensor_1, neg_1, sub_1, minimum_1, neg_2, add, mul_1, mean_1, loss_fake], Original ATen: [aten.lift_fresh, aten.sub, aten.minimum, aten.mul, aten.mean, aten.neg, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0.run(buf2, buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple import torch.nn as nn class HingeDiscriminatorLossCutMix(nn.Module): """ This class implements the hinge gan loss for the discriminator network when utilizing cut mix augmentation. """ def __init__(self) ->None: """ Constructor method """ super(HingeDiscriminatorLossCutMix, self).__init__() def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor' ) ->Tuple[torch.Tensor, torch.Tensor]: """ Forward pass. Loss parts are not summed up to not retain the whole backward graph later. :param prediction: (torch.Tensor) :return: (Tuple[torch.Tensor, torch.Tensor]) Loss values for real and fake part """ loss_real = -torch.mean(torch.minimum(torch.tensor(0.0, dtype=torch .float, device=prediction.device), prediction - 1.0) * label) loss_fake = -torch.mean(torch.minimum(torch.tensor(0.0, dtype=torch .float, device=prediction.device), -prediction - 1.0) * (-label + 1.0)) return loss_real, loss_fake def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp3, tmp2) tmp6 = tmp4 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = -tmp0 tmp11 = tmp10 - tmp1 tmp12 = triton_helpers.minimum(tmp3, tmp11) tmp13 = -tmp5 tmp14 = tmp13 + tmp1 tmp15 = tmp12 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 256.0 tmp20 = tmp9 / tmp19 tmp21 = -tmp20 tmp22 = tmp18 / tmp19 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_lift_fresh_mean_minimum_mul_neg_sub_0[grid(1)]( buf2, buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, buf3 class HingeDiscriminatorLossCutMixNew(nn.Module): """ This class implements the hinge gan loss for the discriminator network when utilizing cut mix augmentation. """ def __init__(self) ->None: """ Constructor method """ super(HingeDiscriminatorLossCutMixNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
ChristophReich1996/Multi-StyleGAN
HingeDiscriminatorLossCutMix
false
17,112
[ "MIT" ]
7
988f2dfea85b3205126b40c61edfb28107eb3173
https://github.com/ChristophReich1996/Multi-StyleGAN/tree/988f2dfea85b3205126b40c61edfb28107eb3173
GlobalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/wz/cwzlgmghy6nxuchbiog4puo46i4tq7yhd3qu6ftkgjf3gwib6hxn.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ph/cph7lssg6ok5pn3rbmbprurourgd72q6n57v7b4qu2gqs3wcxbt7.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %primals_1], 2), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5n/c5nzoru25qhqsfq2ae2hsnhwroe2und6kjr6h7td35tqeg7qekch.py # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_h_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x3), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/sx/csxt2z4nihjsbxcckhpeiwgke5usifqk6ao5saydggkozmd2prcm.py # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # align_vectors_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [c], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 128, grid=grid(128), stream=stream0) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf2, buf7, 64, grid=grid(64), stream=stream0) del buf2 return (buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.cuda import torch.distributed def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class GlobalAttention(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`\\text{score}(H_j,q) = H_j^T q` * general: :math:`\\text{score}(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] attn_func (str): attention function to use, options [softmax,sparsemax] """ def __init__(self, dim, coverage=False, attn_type='dot', attn_func= 'softmax'): super(GlobalAttention, self).__init__() self.dim = dim assert attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type (got {:s}).'.format( attn_type) self.attn_type = attn_type assert attn_func in ['softmax', 'sparsemax' ], 'Please select a valid attention function.' self.attn_func = attn_func if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = nn.Linear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = nn.Linear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)`` h_s (FloatTensor): sequence of sources ``(batch, src_len, dim`` Returns: FloatTensor: raw attention scores (unnormalized) for each src index ``(batch, tgt_len, src_len)`` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = torch.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, source, memory_bank, memory_lengths=None, coverage=None): """ Args: source (FloatTensor): query vectors ``(batch, tgt_len, dim)`` memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)`` memory_lengths (LongTensor): the source context lengths ``(batch,)`` coverage (FloatTensor): None (not supported yet) Returns: (FloatTensor, FloatTensor): * Computed vector ``(tgt_len, batch, dim)`` * Attention distribtutions for each query ``(tgt_len, batch, src_len)`` """ if source.dim() == 2: one_step = True source = source.unsqueeze(1) else: one_step = False batch, source_l, dim = memory_bank.size() batch_, target_l, dim_ = source.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if coverage is not None: batch_, source_l_ = coverage.size() aeq(batch, batch_) aeq(source_l, source_l_) if coverage is not None: cover = coverage.view(-1).unsqueeze(1) memory_bank += self.linear_cover(cover).view_as(memory_bank) memory_bank = torch.tanh(memory_bank) align = self.score(source, memory_bank) if memory_lengths is not None: mask = sequence_mask(memory_lengths, max_len=align.size(-1)) mask = mask.unsqueeze(1) align.masked_fill_(1 - mask, -float('inf')) if self.attn_func == 'softmax': align_vectors = F.softmax(align.view(batch * target_l, source_l ), -1) else: align_vectors = sparsemax(align.view(batch * target_l, source_l ), -1) align_vectors = align_vectors.view(batch, target_l, source_l) c = torch.bmm(align_vectors, memory_bank) concat_c = torch.cat([c, source], 2).view(batch * target_l, dim * 2) attn_h = self.linear_out(concat_c).view(batch, target_l, dim) if self.attn_type in ['general', 'dot']: attn_h = torch.tanh(attn_h) if one_step: attn_h = attn_h.squeeze(1) align_vectors = align_vectors.squeeze(1) batch_, dim_ = attn_h.size() aeq(batch, batch_) aeq(dim, dim_) batch_, source_l_ = align_vectors.size() aeq(batch, batch_) aeq(source_l, source_l_) else: attn_h = attn_h.transpose(0, 1).contiguous() align_vectors = align_vectors.transpose(0, 1).contiguous() target_l_, batch_, dim_ = attn_h.size() aeq(target_l, target_l_) aeq(batch, batch_) aeq(dim, dim_) target_l_, batch_, source_l_ = align_vectors.size() aeq(target_l, target_l_) aeq(batch, batch_) aeq(source_l, source_l_) return attn_h, align_vectors def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x3, tmp1, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5 def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class GlobalAttentionNew(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`\\text{score}(H_j,q) = H_j^T q` * general: :math:`\\text{score}(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] attn_func (str): attention function to use, options [softmax,sparsemax] """ def __init__(self, dim, coverage=False, attn_type='dot', attn_func= 'softmax'): super(GlobalAttentionNew, self).__init__() self.dim = dim assert attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type (got {:s}).'.format( attn_type) self.attn_type = attn_type assert attn_func in ['softmax', 'sparsemax' ], 'Please select a valid attention function.' self.attn_func = attn_func if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = nn.Linear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = nn.Linear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)`` h_s (FloatTensor): sequence of sources ``(batch, src_len, dim`` Returns: FloatTensor: raw attention scores (unnormalized) for each src index ``(batch, tgt_len, src_len)`` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = torch.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
ChenRocks/Distill-BERT-Textgen-ONMT
GlobalAttention
false
17,113
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
VarifocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lq/clq5jcil6pofa5w6btqnbvywzdhayyfa72iuxs6nlbghrrwbnwt7.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, gt, float_1, mul, pred_sigmoid, sub, abs_1, pow_1, mul_1, le, float_2, mul_2, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.gt, aten._to_copy, aten.mul, aten.sigmoid, aten.sub, aten.abs, aten.pow, aten.le, aten.add, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # binary_cross_entropy_with_logits => abs_2, exp, full_default, log1p, minimum, mul_3, neg, sub_1, sub_2, sub_3 # float_1 => convert_element_type # float_2 => convert_element_type_1 # focal_weight => add # gt => gt # le => le # loss => mul_4 # loss_1 => mean # loss_cls => mul_5 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # pred_sigmoid => sigmoid # sub => sub # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %sub_2), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg1_1, 0.0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.75), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg1_1, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%le, torch.float32), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %convert_element_type_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %add), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0 = async_compile.triton('triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp0 > tmp5 tmp14 = tmp13.to(tl.float32) tmp15 = tmp0 * tmp14 tmp16 = tl.sigmoid(tmp3) tmp17 = tmp16 - tmp0 tmp18 = tl_math.abs(tmp17) tmp19 = tmp18 * tmp18 tmp20 = 0.75 tmp21 = tmp19 * tmp20 tmp22 = tmp0 <= tmp5 tmp23 = tmp22.to(tl.float32) tmp24 = tmp21 * tmp23 tmp25 = tmp15 + tmp24 tmp26 = tmp12 * tmp25 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = 256.0 tmp31 = tmp29 / tmp30 tmp32 = tmp31 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp32, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, gt, float_1, mul, pred_sigmoid, sub, abs_1, pow_1, mul_1, le, float_2, mul_2, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.gt, aten._to_copy, aten.mul, aten.sigmoid, aten.sub, aten.abs, aten.pow, aten.le, aten.add, aten.mean] stream0 = get_raw_stream(0) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted= self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp0 > tmp5 tmp14 = tmp13.to(tl.float32) tmp15 = tmp0 * tmp14 tmp16 = tl.sigmoid(tmp3) tmp17 = tmp16 - tmp0 tmp18 = tl_math.abs(tmp17) tmp19 = tmp18 * tmp18 tmp20 = 0.75 tmp21 = tmp19 * tmp20 tmp22 = tmp0 <= tmp5 tmp23 = tmp22.to(tl.float32) tmp24 = tmp21 * tmp23 tmp25 = tmp15 + tmp24 tmp26 = tmp12 * tmp25 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = 256.0 tmp31 = tmp29 / tmp30 tmp32 = tmp31 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class VarifocalLossNew(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLossNew, self).__init__() assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CityU-AIM-Group/HTD
VarifocalLoss
false
17,114
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
SmoothL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/y2/cy2kyw6nvxnpk42drczfgvzr6mi4sv4worqdnnuo5wic2q7bjptf.py # Topologically Sorted Source Nodes: [sub, diff, lt, mul, mul_1, truediv, sub_1, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.div, aten.where, aten.mean] # Source node to ATen node mapping: # diff => abs_1 # loss => where # loss_1 => mean # loss_bbox => mul_2 # lt => lt # mul => mul # mul_1 => mul_1 # sub => sub # sub_1 => sub_1 # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=4] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %abs_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_abs_div_lt_mean_mul_sub_where_0 = async_compile.triton('triton_per_fused_abs_div_lt_mean_mul_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_div_lt_mean_mul_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_div_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp6 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, diff, lt, mul, mul_1, truediv, sub_1, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.div, aten.where, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_div_lt_mean_mul_sub_where_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return loss class SmoothL1Loss(nn.Module): """Smooth L1 loss. Args: beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". Defaults to "mean". loss_weight (float, optional): The weight of loss. """ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, ** kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp6 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_lt_mean_mul_sub_where_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) return loss class SmoothL1LossNew(nn.Module): """Smooth L1 loss. Args: beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". Defaults to "mean". loss_weight (float, optional): The weight of loss. """ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1LossNew, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CityU-AIM-Group/HTD
SmoothL1Loss
false
17,115
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
AppendDim
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/za/czalylhdb7li4us4ixachcuf325sgkr5nrug5yvvyracrm4tn6pb.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # x_1 => repeat # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 1, 1, 1, 1]), kwargs = {}) triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat] stream0 = get_raw_stream(0) triton_poi_fused_repeat_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class AppendDim(nn.Module): """ Append a new dim to states with size out_dim """ def __init__(self, out_dim=1): super().__init__() self.out_dim = out_dim def forward(self, states, **kwargs): x = states.unsqueeze(len(states.size())) x = x.repeat(*([1] * len(states.size()) + [self.out_dim])) return x def reset_parameters(self, *args, **kwargs): pass def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AppendDimNew(nn.Module): """ Append a new dim to states with size out_dim """ def __init__(self, out_dim=1): super().__init__() self.out_dim = out_dim def reset_parameters(self, *args, **kwargs): pass def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
AppendDim
false
17,116
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
ConvWS2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ql/cqlbm6dhwz4x3rm4obwdsb5du532wxmffa4fvmcbmcszaetrazn3.py # Topologically Sorted Source Nodes: [mean, std, sub, add, weight], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mean => mean # std => sqrt, var # sub => sub # weight => div # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %view_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, 1e-05), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) triton_per_fused_add_div_mean_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_std_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tmp0 - tmp20 tmp25 = 1e-05 tmp26 = tmp23 + tmp25 tmp27 = tmp24 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp23, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp27, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vb/cvbno3dccglzmlbisnwicoai3aocrgweun3buh6avsdqdjjhjczh.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0); del buf0 # reuse buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0); del buf3 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, weight], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_std_sub_0.run(buf1, buf5, primals_1, buf6, 4, 64, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf8, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf8, primals_1, primals_3, buf1, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, -1) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = (weight - mean) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) class ConvWS2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05): super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): return conv_ws_2d(x, self.weight, self.bias, self.stride, self. padding, self.dilation, self.groups, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tmp0 - tmp20 tmp25 = 1e-05 tmp26 = tmp23 + tmp25 tmp27 = tmp24 / tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp23, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp27, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0) del buf0 buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_std_sub_0[grid(4)](buf1, buf5, primals_1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_1[grid(16)](buf8, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf8, primals_1, primals_3, buf1, buf5, buf6 def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, -1) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = (weight - mean) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) class ConvWS2dNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05): super(ConvWS2dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CityU-AIM-Group/HTD
ConvWS2d
false
17,117
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
GHMR
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lm/clm6vke3oqtpngoervcg5h6lqqdn24enuvavfhwkxpu5y2mygm5k.py # Topologically Sorted Source Nodes: [sum_1, valid], Original ATen: [aten.sum, aten.gt] # Source node to ATen node mapping: # sum_1 => sum_1 # valid => gt # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg2_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg2_1, 0), kwargs = {}) triton_per_fused_gt_sum_0 = async_compile.triton('triton_per_fused_gt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_gt_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 0.0 tmp5 = tmp0 > tmp4 tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp5, None) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ly/clytv6gd63vz5eacm5k2i7iv42st2zsveu7so7pabsecq7pxcglo.py # Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss, mul_1, add_1, sqrt_1, truediv, abs_1], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.div, aten.abs] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # add_1 => add_1 # diff => sub # loss => sub_1 # mul => mul # mul_1 => mul_1 # sqrt => sqrt # sqrt_1 => sqrt_1 # truediv => div # Graph fragment: # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.0004), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sqrt, 0.02), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 0.0004), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%div,), kwargs = {}) triton_poi_fused_abs_add_div_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_abs_add_div_mul_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_div_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 0.0004 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = 0.02 tmp8 = tmp6 - tmp7 tmp9 = tmp2 / tmp6 tmp10 = tl_math.abs(tmp9) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qk/cqkk6qjpp7wkkifanoacaaodq46jzysz32setq5j6xupwtnynyhi.py # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.zeros_like] # Source node to ATen node mapping: # weights => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_like_2 = async_compile.triton('triton_poi_fused_zeros_like_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_like_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_like_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [sum_1, valid], Original ATen: [aten.sum, aten.gt] stream0 = get_raw_stream(0) triton_per_fused_gt_sum_0.run(arg2_1, buf0, buf4, 1, 256, grid=grid(1), stream=stream0) del arg2_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss, mul_1, add_1, sqrt_1, truediv, abs_1], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.div, aten.abs] triton_poi_fused_abs_add_div_mul_sqrt_sub_1.run(arg0_1, arg1_1, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.zeros_like] triton_poi_fused_zeros_like_2.run(buf3, 256, grid=grid(256), stream=stream0) return (buf0, buf1, buf2, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GHMR(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1000.0 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight def forward(self, pred, target, label_weight, avg_factor=None): """Calculate the GHM-R loss. Args: pred (float tensor of size [batch_num, 4 (* class_num)]): The prediction of box regression layer. Channel number can be 4 or 4 * class_num depending on whether it is class-agnostic. target (float tensor of size [batch_num, 4 (* class_num)]): The target regression values with the same size of pred. label_weight (float tensor of size [batch_num, 4 (* class_num)]): The weight of each sample, 0 if ignored. Returns: The gradient harmonized loss. """ mu = self.mu edges = self.edges mmt = self.momentum diff = pred - target loss = torch.sqrt(diff * diff + mu * mu) - mu g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() weights = torch.zeros_like(g) valid = label_weight > 0 tot = max(label_weight.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: n += 1 if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt ) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin if n > 0: weights /= n loss = loss * weights loss = loss.sum() / tot return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 0.0 tmp5 = tmp0 > tmp4 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, None) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) @triton.jit def triton_poi_fused_abs_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 0.0004 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = 0.02 tmp8 = tmp6 - tmp7 tmp9 = tmp2 / tmp6 tmp10 = tl_math.abs(tmp9) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_zeros_like_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_per_fused_gt_sum_0[grid(1)](arg2_1, buf0, buf4, 1, 256, num_warps=2, num_stages=1) del arg2_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_abs_add_div_mul_sqrt_sub_1[grid(256)](arg0_1, arg1_1, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_zeros_like_2[grid(256)](buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, buf1, buf2, buf3, buf4 class GHMRNew(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): super(GHMRNew, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1000.0 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
CityU-AIM-Group/HTD
GHMR
false
17,118
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
AlignDifferential
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/g5/cg5itricuk36n25hqnqam7zr6ev2maqtyaq226ioova22and6nsp.py # Topologically Sorted Source Nodes: [padded_states], Original ATen: [aten.cat] # Source node to ATen node mapping: # padded_states => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %arg0_1, %sub_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 6 x0 = xindex % 16 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 - tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 5, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp15 & xmask, other=0.0) tmp17 = tmp0 >= tmp13 tmp18 = tl.full([1], 6, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 * tmp6 tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 - tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp17, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp16, tmp25) tmp27 = tl.where(tmp4, tmp11, tmp26) tl.store(out_ptr0 + (x3), tmp27, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/as/casqoflfudx3no4d7bopywr472z2rfrdeczeevzpy37vvqhijbes.py # Topologically Sorted Source Nodes: [sub_2, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub_2 => sub_2 # truediv => div # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_10, %slice_12), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 2), kwargs = {}) triton_poi_fused_div_sub_1 = async_compile.triton('triton_poi_fused_div_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + (32 + x0 + (96*x1)), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (96*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_states], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_2, truediv], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 - tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 5, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 & xmask, other=0.0) tmp17 = tmp0 >= tmp13 tl.full([1], 6, tl.int64) tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 * tmp6 tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 - tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp17, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp16, tmp25) tmp27 = tl.where(tmp4, tmp11, tmp26) tl.store(out_ptr0 + x3, tmp27, xmask) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (32 + x0 + 96 * x1), xmask) tmp1 = tl.load(in_ptr0 + (x0 + 96 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_sub_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, class AlignDifferentialNew(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
AlignDifferential
false
17,119
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
C1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5q/c5qzas2nsvk2zno4s5leinexteup3nynu7nffm5kd7rytg7nduym.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 86400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 6 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/d3/cd3zhrqqcohtfsdkjzdz3fopgxgkkj7x7mirxqixo5wkccbs334s.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 21600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) % 30 x2 = (xindex // 900) x5 = xindex x4 = (xindex // 5400) x6 = xindex % 5400 tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5), tmp6, xmask) tl.store(out_ptr1 + (x6 + (5504*x4)), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 86400, grid=grid(86400), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 21600, grid=grid(21600), stream=stream0) return (buf2, primals_1, primals_3, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class C1(nn.Module): def __init__(self): super(C1, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c1(img) return output def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 6 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 21600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x5 = xindex x4 = xindex // 5400 x6 = xindex % 5400 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 5504 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2, buf1, 86400, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2, buf3, 21600, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C1New(nn.Module): def __init__(self): super(C1New, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c1.c1.weight primals_2 = self.c1.c1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ConstantinSeibold/SGL
C1
false
17,120
[ "MIT" ]
7
fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
https://github.com/ConstantinSeibold/SGL/tree/fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
GaussianFocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ux/cux3rdvmurnb775fshvmb35y2xr5glzkvviyrjmg7ygqg2dz3lsx.py # Topologically Sorted Source Nodes: [add, log, neg, sub_1, pow_2, mul, pos_weights, pos_loss, sub_2, add_1, log_1, neg_1, pow_3, mul_2, sub, neg_weights, neg_loss, loss, loss_1, loss_reg], Original ATen: [aten.add, aten.log, aten.neg, aten.rsub, aten.pow, aten.mul, aten.eq, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # log => log # log_1 => log_1 # loss => add_2 # loss_1 => mean # loss_reg => mul_4 # mul => mul # mul_2 => mul_2 # neg => neg # neg_1 => neg_1 # neg_loss => mul_3 # neg_weights => pow_1 # pos_loss => mul_1 # pos_weights => eq # pow_2 => pow_2 # pow_3 => pow_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-12), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %pow_2), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %eq), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-12), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_1,), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %pow_3), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr1 + (r0), None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = -tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp0 tmp7 = tmp6 * tmp6 tmp8 = tmp4 * tmp7 tmp10 = tmp9 == tmp5 tmp11 = tmp10.to(tl.float32) tmp12 = tmp8 * tmp11 tmp13 = tmp6 + tmp1 tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp0 * tmp0 tmp17 = tmp15 * tmp16 tmp18 = tmp5 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp25 / tmp26 tmp28 = tmp27 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, log, neg, sub_1, pow_2, mul, pos_weights, pos_loss, sub_2, add_1, log_1, neg_1, pow_3, mul_2, sub, neg_weights, neg_loss, loss, loss_1, loss_reg], Original ATen: [aten.add, aten.log, aten.neg, aten.rsub, aten.pow, aten.mul, aten.eq, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction in gaussian distribution. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_reg = self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = -tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp0 tmp7 = tmp6 * tmp6 tmp8 = tmp4 * tmp7 tmp10 = tmp9 == tmp5 tmp11 = tmp10.to(tl.float32) tmp12 = tmp8 * tmp11 tmp13 = tmp6 + tmp1 tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp0 * tmp0 tmp17 = tmp15 * tmp16 tmp18 = tmp5 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp25 / tmp26 tmp28 = tmp27 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLossNew(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CityU-AIM-Group/HTD
GaussianFocalLoss
false
17,121
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
Differential
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/qh/cqh4qgvrgq3lyigdnaczrtgpxsmhr6kl2yjvxwferorxp54gsf4c.py # Topologically Sorted Source Nodes: [differentials], Original ATen: [aten.sub] # Source node to ATen node mapping: # differentials => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %slice_4), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [differentials], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Differential(nn.Module): def __init__(self, kernel_size=3, stride=1, padding=0): super().__init__() self.kernel_size = kernel_size self.stride = stride self.padding = padding def new_length(self, length): new_length = (length + self.padding * 2 - self.kernel_size + 1 ) // self.stride return new_length def forward(self, states): """ :param states: [batch, length, *] """ _batch, length, _n_agents, _state_dim = states.size() padding = self.padding kernel_size = self.kernel_size stride = self.stride if padding != 0: if padding > 0: states = torch.cat([states[:, :1].repeat(1, padding, 1, 1), states, states[:, -1:].repeat(1, padding, 1, 1)], dim=1) else: states = states[:, -padding:padding] new_length = (length + padding * 2 - kernel_size + 1) // stride differentials = states[:, 0:new_length * stride:stride] - states[:, kernel_size - 1:kernel_size - 1 + new_length * stride:stride] return differentials def show(self, name='Differential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = Differential(ks=%d, stride=%d, padding=%d)' % (name, self.kernel_size, self.stride, self.padding)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class DifferentialNew(nn.Module): def __init__(self, kernel_size=3, stride=1, padding=0): super().__init__() self.kernel_size = kernel_size self.stride = stride self.padding = padding def new_length(self, length): new_length = (length + self.padding * 2 - self.kernel_size + 1 ) // self.stride return new_length def show(self, name='Differential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = Differential(ks=%d, stride=%d, padding=%d)' % (name, self.kernel_size, self.stride, self.padding)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
Differential
false
17,122
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
MultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/no/cnon5ajue75qf7yiwbkruc77mekmrelvorvagtbhdc4kdbzwzdin.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/a5/ca56rgpdjbilaspdaau44lnrilsxekhmcnbwzhtrcgfbilkik27p.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xl/cxls6dl5dz3ua4ilno7rjcfd6m7p4ydnd3mzfaq2cepnph6e2y7h.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mf/cmfwwg65sbylkho5zzcgwfl3cw2tru7uwmgab3xaed7bzoxeyhoe.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hq/chqepe2toejrjiisfzuyn6r5ltvr6s23demxgxfw67x2l2lu4e62.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf10, primals_1, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 return (buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MultiheadAttention(nn.Module): """A warpper for torch.nn.MultiheadAttention. This module implements MultiheadAttention with residual connection, and positional encoding used in DETR is also passed as input. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. Same as `nn.MultiheadAttention`. dropout (float): A Dropout layer on attn_output_weights. Default 0.0. """ def __init__(self, embed_dims, num_heads, dropout=0.0): super(MultiheadAttention, self).__init__() assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.' self.embed_dims = embed_dims self.num_heads = num_heads self.dropout = dropout self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout) self.dropout = nn.Dropout(dropout) def forward(self, x, key=None, value=None, residual=None, query_pos= None, key_pos=None, attn_mask=None, key_padding_mask=None): """Forward function for `MultiheadAttention`. Args: x (Tensor): The input query with shape [num_query, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. key (Tensor): The key tensor with shape [num_key, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. Default None. If None, the `query` will be used. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Default None. If None, the `key` will be used. residual (Tensor): The tensor used for addition, with the same shape as `x`. Default None. If None, `x` will be used. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. Default None. If not None, it will be added to `x` before forward function. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Default None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. attn_mask (Tensor): ByteTensor mask with shape [num_query, num_key]. Same in `nn.MultiheadAttention.forward`. Default None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_key]. Same in `nn.MultiheadAttention.forward`. Default None. Returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. """ query = x if key is None: key = query if value is None: value = key if residual is None: residual = x if key_pos is None: if query_pos is not None and key is not None: if query_pos.shape == key.shape: key_pos = query_pos if query_pos is not None: query = query + query_pos if key_pos is not None: key = key + key_pos out = self.attn(query, key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] return residual + self.dropout(out) def __repr__(self): """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(embed_dims={self.embed_dims}, ' repr_str += f'num_heads={self.num_heads}, ' repr_str += f'dropout={self.dropout})' return repr_str def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'embed_dims': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_add_4[grid(16)](buf10, primals_1, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0) class MultiheadAttentionNew(nn.Module): """A warpper for torch.nn.MultiheadAttention. This module implements MultiheadAttention with residual connection, and positional encoding used in DETR is also passed as input. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. Same as `nn.MultiheadAttention`. dropout (float): A Dropout layer on attn_output_weights. Default 0.0. """ def __init__(self, embed_dims, num_heads, dropout=0.0): super(MultiheadAttentionNew, self).__init__() assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.' self.embed_dims = embed_dims self.num_heads = num_heads self.dropout = dropout self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout) self.dropout = nn.Dropout(dropout) def __repr__(self): """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(embed_dims={self.embed_dims}, ' repr_str += f'num_heads={self.num_heads}, ' repr_str += f'dropout={self.dropout})' return repr_str def forward(self, input_0): primals_2 = self.attn.in_proj_weight primals_3 = self.attn.in_proj_bias primals_1 = self.attn.out_proj.weight primals_5 = self.attn.out_proj.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CityU-AIM-Group/HTD
MultiheadAttention
false
17,123
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
C2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/bb/cbbz6cdzgsgnfsb24xptqo36lh3vlzwjmnumh2abfj6oubfuykdv.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 16 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/23/c233r6jv2tfsmhx6tbdns2dazcnc5tu4bwkhwtzww6vfroz64zwg.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) % 30 x2 = (xindex // 900) x5 = xindex x4 = (xindex // 14400) x6 = xindex % 14400 tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5), tmp6, xmask) tl.store(out_ptr1 + (x6 + (14464*x4)), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 6, 64, 64), (24576, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 16, 60, 60), (57856, 3616, 60, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 230400, grid=grid(230400), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 57600, grid=grid(57600), stream=stream0) return (buf2, primals_1, primals_3, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 6, 64, 64), (24576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class C2(nn.Module): def __init__(self): super(C2, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16, kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c2(img) return output def get_inputs(): return [torch.rand([4, 6, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 16 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x5 = xindex x4 = xindex // 14400 x6 = xindex % 14400 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 14464 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 6, 64, 64), (24576, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 16, 60, 60), (57856, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(230400)](buf0, primals_2, buf1, 230400, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(57600)](buf1, buf2, buf3, 57600, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C2New(nn.Module): def __init__(self): super(C2New, self).__init__() self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16, kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c2.c2.weight primals_2 = self.c2.c2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ConstantinSeibold/SGL
C2
false
17,124
[ "MIT" ]
7
fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
https://github.com/ConstantinSeibold/SGL/tree/fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
Distance
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lh/clhkrtu5menof4ayw7i7l4d2bbo6bn65lvq2gf5vyphjubtce756.py # Topologically Sorted Source Nodes: [mul, sum_1, y], Original ATen: [aten.mul, aten.sum, aten.sqrt] # Source node to ATen node mapping: # mul => mul # sum_1 => sum_1 # y => sqrt # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) triton_poi_fused_mul_sqrt_sum_0 = async_compile.triton('triton_poi_fused_mul_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tl.store(out_ptr0 + (x0), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1, y], Original ATen: [aten.mul, aten.sum, aten.sqrt] stream0 = get_raw_stream(0) triton_poi_fused_mul_sqrt_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tl.store(out_ptr0 + x0, tmp19, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0), def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class DistanceNew(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
Distance
false
17,125
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
SIMSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/2m/c2mreqcmcjnouparpcno5pyrzfh4zgyet7jtd4t4hljnougd4ops.py # Topologically Sorted Source Nodes: [neg, diffs, sum_1, pow_1, simse], Original ATen: [aten.neg, aten.add, aten.sum, aten.pow, aten.div] # Source node to ATen node mapping: # diffs => add # neg => neg # pow_1 => pow_1 # simse => div # sum_1 => sum_1 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, 65536), kwargs = {}) triton_per_fused_add_div_neg_pow_sum_0 = async_compile.triton('triton_per_fused_add_div_neg_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tmp6 * tmp6 tmp8 = 1.52587890625e-05 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, diffs, sum_1, pow_1, simse], Original ATen: [aten.neg, aten.add, aten.sum, aten.pow, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SIMSE(nn.Module): def __init__(self): super(SIMSE, self).__init__() def forward(self, pred, real): diffs = torch.add(real, -pred) n = torch.numel(diffs.data) simse = torch.sum(diffs).pow(2) / n ** 2 return simse def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tmp6 * tmp6 tmp8 = 1.52587890625e-05 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SIMSENew(nn.Module): def __init__(self): super(SIMSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Columbine21/TFR-Net
SIMSE
false
17,126
[ "MIT" ]
7
1da01577542e7f477fdf7323ec0696aebc632357
https://github.com/Columbine21/TFR-Net/tree/1da01577542e7f477fdf7323ec0696aebc632357
SoftSmall
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xh/cxh3f42u4u47h4ydfp4zfmfhjls56ruj7hceatggxky3e5m23q3n.py # Topologically Sorted Source Nodes: [sub, exp, truediv, sigmoid], Original ATen: [aten.sub, aten.exp, aten.div, aten.sigmoid] # Source node to ATen node mapping: # exp => exp # sigmoid => sigmoid # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %primals_3), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %exp), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div,), kwargs = {}) triton_poi_fused_div_exp_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_div_exp_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp1 - tmp2 tmp6 = tl_math.exp(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.sigmoid(tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, exp, truediv, sigmoid], Original ATen: [aten.sub, aten.exp, aten.div, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_sigmoid_sub_0.run(primals_1, primals_3, primals_2, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class SoftCompare(nn.Module): def __init__(self, alpha=None, beta=None): super().__init__() self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else alpha), requires_grad=True) self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else beta), requires_grad=True) if alpha is None: nn.init.normal_(self.alpha.data, 0, 1) else: self.alpha.requires_grad_(False) if beta is not None: self.beta.requires_grad_(False) self.sigmoid = nn.Sigmoid() def forward(self, x): raise NotImplementedError class SoftSmall(SoftCompare): """ Sigmoid((alpha - x) / e^beta) """ def __init__(self, alpha=None, beta=None): super().__init__(alpha, beta) def forward(self, x, beta=None): alpha = self.alpha if beta is None: beta = self.beta return self.sigmoid((alpha - x) / torch.exp(beta)) def show(self, name='SoftSmall', indent=0, log=print, **kwargs): alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha beta = kwargs['beta'] if 'beta' in kwargs else self.beta log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name, alpha, math.exp(beta))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp1 - tmp2 tmp6 = tl_math.exp(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.sigmoid(tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_sigmoid_sub_0[grid(256)](primals_1, primals_3, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3, buf0 class SoftCompare(nn.Module): def __init__(self, alpha=None, beta=None): super().__init__() self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else alpha), requires_grad=True) self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else beta), requires_grad=True) if alpha is None: nn.init.normal_(self.alpha.data, 0, 1) else: self.alpha.requires_grad_(False) if beta is not None: self.beta.requires_grad_(False) self.sigmoid = nn.Sigmoid() def forward(self, x): raise NotImplementedError class SoftSmallNew(SoftCompare): """ Sigmoid((alpha - x) / e^beta) """ def __init__(self, alpha=None, beta=None): super().__init__(alpha, beta) def show(self, name='SoftSmall', indent=0, log=print, **kwargs): alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha beta = kwargs['beta'] if 'beta' in kwargs else self.beta log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name, alpha, math.exp(beta))) def forward(self, input_0): primals_1 = self.alpha primals_2 = self.beta primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
SoftSmall
false
17,127
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
GHMC
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/dl/cdlyk5xlew5u6kflrdfuy2ltsiein3sy5bqpnremr4w6ihwn7qo7.py # Topologically Sorted Source Nodes: [valid, float_3, sum_1], Original ATen: [aten.gt, aten._to_copy, aten.sum] # Source node to ATen node mapping: # float_3 => convert_element_type # sum_1 => sum_1 # valid => gt # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg2_1, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type,), kwargs = {}) triton_per_fused__to_copy_gt_sum_0 = async_compile.triton('triton_per_fused__to_copy_gt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_gt_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp2, None) tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fe/cfempj53y7iushsw74t2d3prcg7vrrpc7tvbfgpocvs37fuwsi4l.py # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.zeros_like] # Source node to ATen node mapping: # weights => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zeros_like_1 = async_compile.triton('triton_poi_fused_zeros_like_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_like_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jx/cjxzbgns6sl3h62z2gtnem5gx537rjs3st577el57zgawktnazfb.py # Topologically Sorted Source Nodes: [sigmoid, sub, g], Original ATen: [aten.sigmoid, aten.sub, aten.abs] # Source node to ATen node mapping: # g => abs_1 # sigmoid => sigmoid # sub => sub # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) triton_poi_fused_abs_sigmoid_sub_2 = async_compile.triton('triton_poi_fused_abs_sigmoid_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_sigmoid_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_sigmoid_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.abs(tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf1 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [valid, float_3, sum_1], Original ATen: [aten.gt, aten._to_copy, aten.sum] stream0 = get_raw_stream(0) triton_per_fused__to_copy_gt_sum_0.run(arg2_1, buf0, buf1, 1, 256, grid=grid(1), stream=stream0) del arg2_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weights], Original ATen: [aten.zeros_like] triton_poi_fused_zeros_like_1.run(buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, sub, g], Original ATen: [aten.sigmoid, aten.sub, aten.abs] triton_poi_fused_abs_sigmoid_sub_2.run(arg0_1, arg1_1, buf3, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf1, arg1_1, buf2, buf3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero((labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-06 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, pred, target, label_weight, *args, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. Returns: The gradient harmonized loss. """ if pred.dim() != target.dim(): target, label_weight = _expand_onehot_labels(target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt ) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_abs_sigmoid_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.abs(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_gt_sum_0[grid(1)](arg2_1, buf0, buf1, 1, 256, num_warps=2, num_stages=1) del arg2_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_zeros_like_1[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_abs_sigmoid_sub_2[grid(256)](arg0_1, arg1_1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf1, arg1_1, buf2, buf3, buf0 def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero((labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights class GHMCNew(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMCNew, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-06 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
CityU-AIM-Group/HTD
GHMC
false
17,128
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
Inequality
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/uj/cujnfo3ca43wq6vkyrhrwpgidlxzbqnknyct5inf64op233r3bdh.py # Topologically Sorted Source Nodes: [sub, truediv, sigmoid], Original ATen: [aten.sub, aten.div, aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 1.0), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div,), kwargs = {}) triton_poi_fused_div_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_div_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tl.sigmoid(tmp5) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv, sigmoid], Original ATen: [aten.sub, aten.div, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_div_sigmoid_sub_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tl.sigmoid(tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_div_sigmoid_sub_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, buf0 class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class InequalityNew(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions def forward(self, input_0): primals_2 = self.thresholds primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
Inequality
false
17,129
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
MinPoolTrinary
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gz/cgzue5k7nydthhmrmpvmetbkq5weu3owbtvxljcrtk77ydgij6ov.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 3 x0 = xindex % 16 x2 = (xindex // 48) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tmp16 = tl.full([1], 3, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp9, tmp14, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + (x3), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MinPoolTrinary(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ assert states.size(1) >= 3 side_length = (states.size(1) + 1) // 3 return torch.cat([torch.min(states[:, :side_length], dim=1, keepdim =True)[0], torch.min(states[:, side_length:-side_length], dim=1, keepdim=True)[0], torch.min(states[:, -side_length:], dim=1, keepdim=True)[0]], dim=1) def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 3, tl.int64) tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp9, tmp14, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MinPoolTrinaryNew(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
MinPoolTrinary
false
17,130
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
MaxPoolTrinary
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zz/czzsdhkvjcngkxqqrb3g2dmp6yzosharmg6mnhgj4huqq2ymv2ag.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 3 x0 = xindex % 16 x2 = (xindex // 48) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tmp16 = tl.full([1], 3, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp9, tmp14, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + (x3), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MaxPoolTrinary(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ assert states.size(1) >= 3 side_length = (states.size(1) + 1) // 3 return torch.cat([torch.max(states[:, :side_length], dim=1, keepdim =True)[0], torch.max(states[:, side_length:-side_length], dim=1, keepdim=True)[0], torch.max(states[:, -side_length:], dim=1, keepdim=True)[0]], dim=1) def show(self, name='MaxPoolTrinary', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = MaxPoolTrinary()' % (name,)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 3, tl.int64) tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp9, tmp14, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MaxPoolTrinaryNew(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def show(self, name='MaxPoolTrinary', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = MaxPoolTrinary()' % (name,)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
MaxPoolTrinary
false
17,131
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
BalancedL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/od/codgjmibjthg5ee4tydbaiox7xzv2h2updcf2iabc7mzp45byskm.py # Topologically Sorted Source Nodes: [sub, diff, lt, mul, add, mul_1, mul_2, truediv, add_1, log, mul_3, mul_4, sub_1, mul_5, add_2, sub_2, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.add, aten.div, aten.log, aten.where, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # diff => abs_1 # log => log # loss => where # loss_1 => mean # loss_bbox => mul_6 # lt => lt # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=5] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 19.085536923187664), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.02619784824562798), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 19.085536923187664), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 1.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 1.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 0.07859354473688394), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %sub_1, %sub_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 19.085536923187664 tmp7 = tmp3 * tmp6 tmp8 = tmp7 + tmp4 tmp9 = 0.02619784824562798 tmp10 = tmp8 * tmp9 tmp11 = tmp7 * tmp4 tmp12 = tmp11 + tmp4 tmp13 = tl_math.log(tmp12) tmp14 = tmp10 * tmp13 tmp15 = 0.5 tmp16 = tmp3 * tmp15 tmp17 = tmp14 - tmp16 tmp18 = 1.5 tmp19 = tmp3 * tmp18 tmp20 = 0.07859354473688394 tmp21 = tmp19 + tmp20 tmp22 = tmp21 - tmp15 tmp23 = tl.where(tmp5, tmp17, tmp22) tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = 256.0 tmp28 = tmp26 / tmp27 tmp29 = tmp28 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, diff, lt, mul, add, mul_1, mul_2, truediv, add_1, log, mul_3, mul_4, sub_1, mul_5, add_2, sub_2, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.add, aten.div, aten.log, aten.where, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): """Calculate balanced L1 loss. Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). beta (float): The loss is a piecewise function of prediction and target and ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) b = np.e ** (gamma / alpha) - 1 loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log( b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss class BalancedL1Loss(nn.Module): """Balanced L1 Loss. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Args: alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. beta (float, optional): The loss is a piecewise function of prediction and target. ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function of loss. Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 19.085536923187664 tmp7 = tmp3 * tmp6 tmp8 = tmp7 + tmp4 tmp9 = 0.02619784824562798 tmp10 = tmp8 * tmp9 tmp11 = tmp7 * tmp4 tmp12 = tmp11 + tmp4 tmp13 = tl_math.log(tmp12) tmp14 = tmp10 * tmp13 tmp15 = 0.5 tmp16 = tmp3 * tmp15 tmp17 = tmp14 - tmp16 tmp18 = 1.5 tmp19 = tmp3 * tmp18 tmp20 = 0.07859354473688394 tmp21 = tmp19 + tmp20 tmp22 = tmp21 - tmp15 tmp23 = tl.where(tmp5, tmp17, tmp22) tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = 256.0 tmp28 = tmp26 / tmp27 tmp29 = tmp28 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): """Calculate balanced L1 loss. Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). beta (float): The loss is a piecewise function of prediction and target and ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) b = np.e ** (gamma / alpha) - 1 loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log( b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss class BalancedL1LossNew(nn.Module): """Balanced L1 Loss. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Args: alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. beta (float, optional): The loss is a piecewise function of prediction and target. ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1LossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CityU-AIM-Group/HTD
BalancedL1Loss
false
17,132
[ "MIT" ]
5
0be9fd844118c275abc6053b3cbd5ffb589e62ee
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
C3
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/3t/c3t6ghdt4h2cj6ra3352ryvylmpct6qllvddssf556f4jm5sj2y6.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1920 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (16*x2) + (400*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4s/c4sadsdjxqofsbvu6ka7fxmlklpifbdshg3lthvhcbzzutqmox3h.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (16*x2) + (65536*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cv/ccvdn3clbxkqaer37mef6wvz5gw22kfs7i7eozau45wdjmq7esyf.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 480 xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 120 y1 = (yindex // 120) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (120*x2) + (432000*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (3600*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (120*x2) + (432000*y1)), tmp6, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (120, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_2, (120, ), (1, )) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((120, 16, 5, 5), (400, 1, 80, 16), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 1920, 25, grid=grid(1920, 25), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 64, 4096, grid=grid(64, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 120, 60, 60), (432000, 1, 7200, 120)) buf3 = empty_strided_cuda((4, 120, 60, 60), (432000, 3600, 60, 1), torch.float32) buf4 = empty_strided_cuda((4, 120, 60, 60), (432000, 1, 7200, 120), torch.bool) # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf2, primals_2, buf3, buf4, 480, 3600, grid=grid(480, 3600), stream=stream0) del buf2 del primals_2 return (buf3, buf0, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((120, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 16, 64, 64), (65536, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from collections import OrderedDict class C3(nn.Module): def __init__(self): super(C3, self).__init__() self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120, kernel_size=(5, 5))), ('relu3', nn.ReLU())])) def forward(self, img): output = self.c3(img) return output def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 1920 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 480 xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 120 y1 = yindex // 120 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 120 * x2 + 432000 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 3600 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 120 * x2 + 432000 * y1), tmp6, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (120, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_2, (120,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((120, 16, 5, 5), (400, 1, 80, 16), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(1920, 25)](primals_1, buf0, 1920, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16), torch.float32) triton_poi_fused_1[grid(64, 4096)](primals_3, buf1, 64, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 120, 60, 60), (432000, 1, 7200, 120)) buf3 = empty_strided_cuda((4, 120, 60, 60), (432000, 3600, 60, 1), torch.float32) buf4 = empty_strided_cuda((4, 120, 60, 60), (432000, 1, 7200, 120), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(480, 3600) ](buf2, primals_2, buf3, buf4, 480, 3600, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 del primals_2 return buf3, buf0, buf1, buf4 class C3New(nn.Module): def __init__(self): super(C3New, self).__init__() self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120, kernel_size=(5, 5))), ('relu3', nn.ReLU())])) def forward(self, input_0): primals_1 = self.c3.c3.weight primals_2 = self.c3.c3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ConstantinSeibold/SGL
C3
false
17,133
[ "MIT" ]
7
fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
https://github.com/ConstantinSeibold/SGL/tree/fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
Subsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zj/czjh4ovfm74jppupklcmchvu7zn7gtiwxbb5i6etfuzlbqatnyxc.py # Topologically Sorted Source Nodes: [lengths], Original ATen: [aten.floor_divide] # Source node to ATen node mapping: # lengths => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor_mode](args = (%arg1_1, 2), kwargs = {rounding_mode: floor}) triton_poi_fused_floor_divide_0 = async_compile.triton('triton_poi_fused_floor_divide_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_floor_divide_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lengths], Original ATen: [aten.floor_divide] stream0 = get_raw_stream(0) triton_poi_fused_floor_divide_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 return (reinterpret_tensor(arg0_1, (4, 2, 4, 4), (64, 32, 4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class Subsample(nn.Module): def __init__(self): super().__init__() def forward(self, feats, lengths): out = feats[:, ::2] lengths = lengths // 2 return out, lengths def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_floor_divide_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 return reinterpret_tensor(arg0_1, (4, 2, 4, 4), (64, 32, 4, 1), 0), buf0 class SubsampleNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
CoraJung/flexible-input-slu
Subsample
false
17,134
[ "Apache-2.0" ]
7
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
MultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/nw/cnw3kd6ke5ounvjbd2m3tal67rbbd4a7p6lfs3x7gity7k6mvlwt.py # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # q_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/eo/ceow5omvhb2nzxwebieopiqyw6by3ra4roqsyt5ffvei2tf24t55.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/su/csu74p3rcw6tnytcg5xyrsowvm6h3xmdgnitt2y4frvm3vhk6ght.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ww/cwwpcb3yl6udrwwu55elkl44rncefeidgsmfifkkwc77gnpr3q6l.py # Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # attn_weights_4 => div_1 # sum_1 => sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_12, [1]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {}) triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4) buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0) del buf4 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div] triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.attn_dropout = attn_dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.register_parameter('in_proj_bias', None) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward(self, query, key, value, attn_mask=None): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Timesteps can be masked by supplying a T x T mask in the `attn_mask` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if qkv_same: q, k, v = self.in_proj_qkv(query) elif kv_same: q = self.in_proj_q(query) if key is None: assert value is None k = v = None else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q = self.scaling * q if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) src_len = k.size(1) if self.add_zero_attn: src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: try: attn_weights += attn_mask.unsqueeze(0) except: None None assert False attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as( attn_weights) attn_weights = F.dropout(attn_weights, p=self.attn_dropout, training=self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query, **kwargs): return self._in_proj(query, end=self.embed_dim, **kwargs) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None, **kwargs): weight = kwargs.get('weight', self.in_proj_weight) bias = kwargs.get('bias', self.in_proj_bias) weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4) buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf4 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0 ), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0) class MultiheadAttentionNew(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.attn_dropout = attn_dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.register_parameter('in_proj_bias', None) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query, **kwargs): return self._in_proj(query, end=self.embed_dim, **kwargs) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None, **kwargs): weight = kwargs.get('weight', self.in_proj_weight) bias = kwargs.get('bias', self.in_proj_bias) weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def forward(self, input_0, input_1, input_2): primals_4 = self.in_proj_weight primals_5 = self.in_proj_bias primals_6 = self.out_proj.weight primals_7 = self.out_proj.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Columbine21/TFR-Net
MultiheadAttention
false
17,135
[ "MIT" ]
7
1da01577542e7f477fdf7323ec0696aebc632357
https://github.com/Columbine21/TFR-Net/tree/1da01577542e7f477fdf7323ec0696aebc632357
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/en/cen5gululiumhv4etqz6ru3yfe5xtij7x3m5gqo4zsnlyjrns3qt.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # norm => add # out => mul # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Coral-SH/TextBoxes_PyTorch
L2Norm
false
17,136
[ "MIT" ]
8
fb1636139d69e762b567a234c3a4b69e3dd43071
https://github.com/Coral-SH/TextBoxes_PyTorch/tree/fb1636139d69e762b567a234c3a4b69e3dd43071
Ternary
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/67/c67mp6ntuv2cv5putllufmxq7gw46sh3lgr2awwj2wgcun6nx45e.py # Topologically Sorted Source Nodes: [setitem, setitem_1, setitem_2], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default, index_put # setitem_1 => full_default_1, index_put_1 # setitem_2 => full_default_2, index_put_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%arg0_1, [%lt], %full_default), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_1 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%ge], %full_default_1), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_1, [%bitwise_not], %full_default_2), kwargs = {}) triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -0.25 tmp2 = tmp0 < tmp1 tmp3 = -1.0 tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = 0.25 tmp6 = tmp0 >= tmp5 tmp7 = 1.0 tmp8 = tl.where(tmp6, tmp7, tmp4) tmp9 = tmp2 | tmp6 tmp10 = tmp9 == 0 tmp11 = 0.0 tmp12 = tl.where(tmp10, tmp11, tmp8) tl.store(in_out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [setitem, setitem_1, setitem_2], Original ATen: [aten.lift_fresh, aten.index_put] stream0 = get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0.run(buf2, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Ternary(nn.Module): """ Ternarize the input activations to -1, 0, 1. """ def __init__(self, left=-0.25, right=0.25): super().__init__() self.left = left self.right = right def forward(self, input): input = input.clone() left_index = input.lt(self.left) right_index = input.ge(self.right) input[left_index] = -1 input[right_index] = 1 input[~(left_index | right_index)] = 0 return input def backward(self, grad_output): grad_input = grad_output.clone() return grad_input def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_put_lift_fresh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.25 tmp2 = tmp0 < tmp1 tmp3 = -1.0 tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = 0.25 tmp6 = tmp0 >= tmp5 tmp7 = 1.0 tmp8 = tl.where(tmp6, tmp7, tmp4) tmp9 = tmp2 | tmp6 tmp10 = tmp9 == 0 tmp11 = 0.0 tmp12 = tl.where(tmp10, tmp11, tmp8) tl.store(in_out_ptr0 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_index_put_lift_fresh_0[grid(256)](buf2, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf2, class TernaryNew(nn.Module): """ Ternarize the input activations to -1, 0, 1. """ def __init__(self, left=-0.25, right=0.25): super().__init__() self.left = left self.right = right def backward(self, grad_output): grad_input = grad_output.clone() return grad_input def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
Ternary
false
17,137
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
NonLocalLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/x2/cx2syrw434limpmb7dprdziicl7jpidvl5cm3mr4jxs6xcl76p2t.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mc/cmc5vjxc52m7mc6hsfq3tjijy5a3r4tagbkupogg7lnqnaq7pag6.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten._softmax] # Source node to ATen node mapping: # y => div_1, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/if/cifvikojqzcsedkolc3gitqniokpehyf3lojvwfd3yreamlhpn4x.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.div] # Source node to ATen node mapping: # z => div_2 # Graph fragment: # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_1, 4.0), kwargs = {}) triton_poi_fused_div_2 = async_compile.triton('triton_poi_fused_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/h3/ch3dkn75z5nv2s6poei22lobtkafusftzt2ks6goill4cq3nfbmj.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %permute_1), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 1, 4), 0), out=buf5) buf8 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf5, buf8, 64, 16, grid=grid(64), stream=stream0) del buf5 buf9 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, reinterpret_tensor(buf4, (4, 16, 4), (64, 4, 1), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [z], Original ATen: [aten.div] triton_poi_fused_div_2.run(buf10, 256, grid=grid(256), stream=stream0) buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf12, primals_11, primals_1, 256, grid=grid(256), stream=stream0) del primals_11 return (buf12, primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf4, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf3, (4, 16, 4), (64, 4, 1), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class NonLocalLayer(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim=None, t_kernel_size=1, t_stride=1, t_padding=None, t_dilation=1, bias= True, residual=True): super().__init__() if t_padding is None: t_padding = (t_kernel_size - 1) // 2 self.input_dim = input_dim self.output_dim = output_dim hidden_dim = hidden_dim if hidden_dim is not None else (input_dim + output_dim) // 2 self.hidden_dim = hidden_dim self.theta = nn.Linear(hidden_dim, hidden_dim) self.phi = nn.Linear(hidden_dim, hidden_dim) self.g = nn.Linear(hidden_dim, hidden_dim) self.f = nn.Linear(hidden_dim, output_dim) self.conv = nn.Conv2d(input_dim, hidden_dim, kernel_size=( t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias) if not residual: self.residual = lambda x: 0 elif input_dim == output_dim and t_stride == 1: self.residual = lambda x: x else: self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=1, stride=(t_stride, 1))) self.relu = nn.ReLU() def forward(self, x): """ :param x: [batch, length, n_agents, input_dim] return output[batch, length, n_agents, output_dim] """ batch, length, n_agents, _ = x.size() x = x.permute(0, 3, 1, 2) res = self.residual(x) if not isinstance(res, int): res = res.permute(0, 2, 3, 1) x = self.conv(x).permute(0, 2, 3, 1) length = x.size(1) x = x.reshape(batch, length * n_agents, self.hidden_dim) theta = self.theta(x.reshape(-1, self.hidden_dim)).reshape(batch, length * n_agents, -1) phi = self.phi(x.reshape(-1, self.hidden_dim)).reshape(batch, length * n_agents, -1).permute(0, 2, 1) g = self.g(x.reshape(-1, self.hidden_dim)).reshape(batch, length * n_agents, -1) y = (torch.bmm(theta, phi) / theta.size(-1) ** 0.5).softmax(dim=2) z = torch.bmm(y, g) / y.size(-1) ** 0.5 z = self.f(z.view(batch, length * n_agents, -1)).reshape(batch, length, n_agents, self.output_dim) return z + res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) @triton.jit def triton_poi_fused_div_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 1, 4), 0), out=buf5) buf8 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) triton_per_fused__softmax_1[grid(64)](buf5, buf8, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf5 buf9 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf4, (4, 16, 4), (64, 4, 1), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_div_2[grid(256)](buf10, 256, XBLOCK=256, num_warps =4, num_stages=1) buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf11 triton_poi_fused_add_3[grid(256)](buf12, primals_11, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return buf12, primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), ( 64, 1, 16, 4), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf10, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf4, (4, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf3, (4, 16, 4), (64, 4, 1), 0 ), primals_8, primals_6, primals_4 class NonLocalLayerNew(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim=None, t_kernel_size=1, t_stride=1, t_padding=None, t_dilation=1, bias= True, residual=True): super().__init__() if t_padding is None: t_padding = (t_kernel_size - 1) // 2 self.input_dim = input_dim self.output_dim = output_dim hidden_dim = hidden_dim if hidden_dim is not None else (input_dim + output_dim) // 2 self.hidden_dim = hidden_dim self.theta = nn.Linear(hidden_dim, hidden_dim) self.phi = nn.Linear(hidden_dim, hidden_dim) self.g = nn.Linear(hidden_dim, hidden_dim) self.f = nn.Linear(hidden_dim, output_dim) self.conv = nn.Conv2d(input_dim, hidden_dim, kernel_size=( t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias) if not residual: self.residual = lambda x: 0 elif input_dim == output_dim and t_stride == 1: self.residual = lambda x: x else: self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=1, stride=(t_stride, 1))) self.relu = nn.ReLU() def forward(self, input_0): primals_4 = self.theta.weight primals_3 = self.theta.bias primals_6 = self.phi.weight primals_5 = self.phi.bias primals_8 = self.g.weight primals_7 = self.g.bias primals_10 = self.f.weight primals_9 = self.f.bias primals_2 = self.conv.weight primals_11 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
NonLocalLayer
false
17,138
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
Abs
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/pt/cptvxetwofnscggxrgs4w2kj53s7pn2lu7ttuxjrbyumsyeey2xw.py # Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs] # Source node to ATen node mapping: # abs_1 => abs_1 # Graph fragment: # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_abs_0 = async_compile.triton('triton_poi_fused_abs_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.abs(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs] stream0 = get_raw_stream(0) triton_poi_fused_abs_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class Abs(torch.nn.Module): def __init__(self): super(Abs, self).__init__() def forward(self, input): return torch.abs(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.abs(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AbsNew(torch.nn.Module): def __init__(self): super(AbsNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CoraJung/flexible-input-slu
Abs
false
17,139
[ "Apache-2.0" ]
7
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
AvgPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zo/czog74ucn2lnbdjfwh5klimpf64acwsdhtchikb3mnxg4dkciyec.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => avg_pool2d # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + (x4), tmp93, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.backends.cudnn class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * ( 0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + ( -1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + x4, tmp93, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), class AvgPoolPadNew(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CalebEverett/fastai-dl2
AvgPoolPad
false
17,140
[ "Apache-2.0" ]
4
64d23592eddca6ca1f3647e73c319e97c8eb392b
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
GlobalAvgPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yg/cygooswl5gkxugqq2ejgag2vtcqhtumn2j3notsgzty3xoxbrq4v.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf1, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class GlobalAvgPool2d(nn.Module): def __init__(self): """Global average pooling over the input's spatial dimensions""" super(GlobalAvgPool2d, self).__init__() def forward(self, inputs): return nn.functional.adaptive_avg_pool2d(inputs, 1).view(inputs. size(0), -1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0), class GlobalAvgPool2dNew(nn.Module): def __init__(self): """Global average pooling over the input's spatial dimensions""" super(GlobalAvgPool2dNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
GlobalAvgPool2d
false
17,141
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/hf/chfq34gskpaqs37rpzhew5n4jxv7w673tzb3b5wfw5s7aki7upzw.py # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # std => sqrt, var # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex x3 = (xindex // 64) x5 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4), xmask) tmp2 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x4), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class LayerNorm(torch.nn.Module): def __init__(self, dim, eps=1e-06): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(dim)) self.beta = nn.Parameter(torch.zeros(dim)) self.eps = eps def forward(self, x): mean = x.mean(1, keepdim=True) std = x.std(1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex x3 = xindex // 64 x5 = xindex % 16 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp2 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(torch.nn.Module): def __init__(self, dim, eps=1e-06): super(LayerNormNew, self).__init__() self.gamma = nn.Parameter(torch.ones(dim)) self.beta = nn.Parameter(torch.zeros(dim)) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CoraJung/flexible-input-slu
LayerNorm
false
17,142
[ "Apache-2.0" ]
7
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
WithBall
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/qf/cqfd52hb56emhqftygc66fzewiqvyilmvkw7o7b3fxrywvi5gi2b.py # Topologically Sorted Source Nodes: [sub_1, exp, truediv, small], Original ATen: [aten.sub, aten.exp, aten.div, aten.sigmoid] # Source node to ATen node mapping: # exp => exp # small => sigmoid # sub_1 => sub_1 # truediv => div # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %view_5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%primals_3,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %exp), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, %exp), kwargs = {}) triton_poi_fused_div_exp_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_div_exp_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 4 x2 = (xindex // 12) x3 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (4 + (4*x0) + (16*x1) + (64*x2) + (128*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + ((16*x1) + (64*x2) + (64*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (5 + (4*x0) + (16*x1) + (64*x2) + (128*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (16*x1) + (64*x2) + (64*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (6 + (4*x0) + (16*x1) + (64*x2) + (128*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (16*x1) + (64*x2) + (64*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (7 + (4*x0) + (16*x1) + (64*x2) + (128*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + (16*x1) + (64*x2) + (64*((x0 + (3*x1)) // 12))), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (0)) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp5 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp10 + tmp14 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp1 - tmp21 tmp25 = tl_math.exp(tmp24) tmp26 = tmp22 / tmp25 tmp27 = tl.sigmoid(tmp26) tmp28 = tmp26 / tmp25 tl.store(out_ptr1 + (x3), tmp27, xmask) tl.store(out_ptr2 + (x3), tmp28, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, exp, truediv, small], Original ATen: [aten.sub, aten.exp, aten.div, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_sigmoid_sub_0.run(primals_2, primals_1, primals_3, buf1, buf2, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 return (buf1, primals_3, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class MinPoolTrinary(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ assert states.size(1) >= 3 side_length = (states.size(1) + 1) // 3 return torch.cat([torch.min(states[:, :side_length], dim=1, keepdim =True)[0], torch.min(states[:, side_length:-side_length], dim=1, keepdim=True)[0], torch.min(states[:, -side_length:], dim=1, keepdim=True)[0]], dim=1) def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,)) class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Position(nn.Module): def __init__(self, position_extractor=lambda x: x): super().__init__() self.position_extractor = position_extractor def forward(self, states): """ :param states: [batch, length, n_agents, state_dim] """ return apply_last_dim(self.position_extractor, states) def show(self, name='Position', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = x's first three dims" % name) class SoftCompare(nn.Module): def __init__(self, alpha=None, beta=None): super().__init__() self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else alpha), requires_grad=True) self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else beta), requires_grad=True) if alpha is None: nn.init.normal_(self.alpha.data, 0, 1) else: self.alpha.requires_grad_(False) if beta is not None: self.beta.requires_grad_(False) self.sigmoid = nn.Sigmoid() def forward(self, x): raise NotImplementedError class SoftSmall(SoftCompare): """ Sigmoid((alpha - x) / e^beta) """ def __init__(self, alpha=None, beta=None): super().__init__(alpha, beta) def forward(self, x, beta=None): alpha = self.alpha if beta is None: beta = self.beta return self.sigmoid((alpha - x) / torch.exp(beta)) def show(self, name='SoftSmall', indent=0, log=print, **kwargs): alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha beta = kwargs['beta'] if 'beta' in kwargs else self.beta log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name, alpha, math.exp(beta))) class WithBall(nn.Module): def __init__(self, alpha=None, beta=None, trinary=False): super().__init__() self.position = Position() self.trinary = trinary self.distance = Distance() if trinary: self.min_pool_trinary = MinPoolTrinary() self.small = SoftSmall(alpha, beta) def new_length(self, length): return 3 if self.trinary else length def get_descriptions(self, n_agents, length): n_players = n_agents // 2 agent_name = ['ball'] + [('A' + str(i)) for i in range(1, n_players + 1)] + [('B' + str(i)) for i in range(1, n_players + 1)] res = [] if self.trinary: trinary_name = ['pre', 'act', 'eff'] for i in range(3): for p in range(1, n_agents): res.append('WithBall(%s, %s)' % (agent_name[p], trinary_name[i])) else: new_length = self.new_length(length) for i in range(0, new_length): for p in range(1, n_agents): res.append('WithBall(%s, %.2f)' % (agent_name[p], (i + 0.5) / new_length)) return res def forward(self, states, beta=None): """ :param states: [batch, length, n_agents, state_dim] return [batch, length, n_agents - 1, 1] """ _batch, _length, _n_agents, _state_dim = states.size() ball_pos = self.position(states[:, :, :1]) player_pos = self.position(states[:, :, 1:]) dists = self.distance(player_pos, ball_pos) if self.trinary: dists = self.min_pool_trinary(dists) small = self.small(dists, beta=beta) return small def show(self, name='WithBall', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(p) = small(distance(p, ball))' % name) self.distance.show('distance', indent + 2, **kwargs) self.small.show('small', indent + 2, log=log, **kwargs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 4 x2 = xindex // 12 x3 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (4 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) // 12 )), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (5 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (6 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (7 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + 16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) // 12)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + 0) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp5 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp10 + tmp14 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp1 - tmp21 tmp25 = tl_math.exp(tmp24) tmp26 = tmp22 / tmp25 tmp27 = tl.sigmoid(tmp26) tmp28 = tmp26 / tmp25 tl.store(out_ptr1 + x3, tmp27, xmask) tl.store(out_ptr2 + x3, tmp28, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_sigmoid_sub_0[grid(48)](primals_2, primals_1, primals_3, buf1, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 return buf1, primals_3, buf1, buf2 def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class MinPoolTrinary(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ assert states.size(1) >= 3 side_length = (states.size(1) + 1) // 3 return torch.cat([torch.min(states[:, :side_length], dim=1, keepdim =True)[0], torch.min(states[:, side_length:-side_length], dim=1, keepdim=True)[0], torch.min(states[:, -side_length:], dim=1, keepdim=True)[0]], dim=1) def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,)) class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Position(nn.Module): def __init__(self, position_extractor=lambda x: x): super().__init__() self.position_extractor = position_extractor def forward(self, states): """ :param states: [batch, length, n_agents, state_dim] """ return apply_last_dim(self.position_extractor, states) def show(self, name='Position', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = x's first three dims" % name) class SoftCompare(nn.Module): def __init__(self, alpha=None, beta=None): super().__init__() self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else alpha), requires_grad=True) self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else beta), requires_grad=True) if alpha is None: nn.init.normal_(self.alpha.data, 0, 1) else: self.alpha.requires_grad_(False) if beta is not None: self.beta.requires_grad_(False) self.sigmoid = nn.Sigmoid() def forward(self, x): raise NotImplementedError class SoftSmall(SoftCompare): """ Sigmoid((alpha - x) / e^beta) """ def __init__(self, alpha=None, beta=None): super().__init__(alpha, beta) def forward(self, x, beta=None): alpha = self.alpha if beta is None: beta = self.beta return self.sigmoid((alpha - x) / torch.exp(beta)) def show(self, name='SoftSmall', indent=0, log=print, **kwargs): alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha beta = kwargs['beta'] if 'beta' in kwargs else self.beta log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name, alpha, math.exp(beta))) class WithBallNew(nn.Module): def __init__(self, alpha=None, beta=None, trinary=False): super().__init__() self.position = Position() self.trinary = trinary self.distance = Distance() if trinary: self.min_pool_trinary = MinPoolTrinary() self.small = SoftSmall(alpha, beta) def new_length(self, length): return 3 if self.trinary else length def get_descriptions(self, n_agents, length): n_players = n_agents // 2 agent_name = ['ball'] + [('A' + str(i)) for i in range(1, n_players + 1)] + [('B' + str(i)) for i in range(1, n_players + 1)] res = [] if self.trinary: trinary_name = ['pre', 'act', 'eff'] for i in range(3): for p in range(1, n_agents): res.append('WithBall(%s, %s)' % (agent_name[p], trinary_name[i])) else: new_length = self.new_length(length) for i in range(0, new_length): for p in range(1, n_agents): res.append('WithBall(%s, %.2f)' % (agent_name[p], (i + 0.5) / new_length)) return res def show(self, name='WithBall', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(p) = small(distance(p, ball))' % name) self.distance.show('distance', indent + 2, **kwargs) self.small.show('small', indent + 2, log=log, **kwargs) def forward(self, input_0): primals_2 = self.small.alpha primals_3 = self.small.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
WithBall
false
17,143
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
SoftLarge
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ry/cryhq72qrvuosrlnmxgmznsvclbwa2kpjr5njwbcmh6rauzveapj.py # Topologically Sorted Source Nodes: [sub, exp, truediv, sigmoid], Original ATen: [aten.sub, aten.exp, aten.div, aten.sigmoid] # Source node to ATen node mapping: # exp => exp # sigmoid => sigmoid # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %primals_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %exp), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div,), kwargs = {}) triton_poi_fused_div_exp_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_div_exp_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tl_math.exp(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.sigmoid(tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, exp, truediv, sigmoid], Original ATen: [aten.sub, aten.exp, aten.div, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_sigmoid_sub_0.run(primals_3, primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class SoftCompare(nn.Module): def __init__(self, alpha=None, beta=None): super().__init__() self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else alpha), requires_grad=True) self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else beta), requires_grad=True) if alpha is None: nn.init.normal_(self.alpha.data, 0, 1) else: self.alpha.requires_grad_(False) if beta is not None: self.beta.requires_grad_(False) self.sigmoid = nn.Sigmoid() def forward(self, x): raise NotImplementedError class SoftLarge(SoftCompare): """ Sigmoid((x - alpha) / e^beta) """ def __init__(self, alpha=None, beta=None): super().__init__(alpha, beta) def forward(self, x, beta=None): alpha = self.alpha if beta is None: beta = self.beta return self.sigmoid((x - alpha) / torch.exp(beta)) def show(self, name='SoftLarge', indent=0, log=print, **kwargs): alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha beta = kwargs['beta'] if 'beta' in kwargs else self.beta log(' ' * indent + '- %s(x) = Sigmoid((x - %lf) / %lf)' % (name, alpha, math.exp(beta))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tl_math.exp(tmp5) tmp7 = tmp3 / tmp6 tmp8 = tl.sigmoid(tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_sigmoid_sub_0[grid(256)](primals_3, primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3, buf0 class SoftCompare(nn.Module): def __init__(self, alpha=None, beta=None): super().__init__() self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else alpha), requires_grad=True) self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else beta), requires_grad=True) if alpha is None: nn.init.normal_(self.alpha.data, 0, 1) else: self.alpha.requires_grad_(False) if beta is not None: self.beta.requires_grad_(False) self.sigmoid = nn.Sigmoid() def forward(self, x): raise NotImplementedError class SoftLargeNew(SoftCompare): """ Sigmoid((x - alpha) / e^beta) """ def __init__(self, alpha=None, beta=None): super().__init__(alpha, beta) def show(self, name='SoftLarge', indent=0, log=print, **kwargs): alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha beta = kwargs['beta'] if 'beta' in kwargs else self.beta log(' ' * indent + '- %s(x) = Sigmoid((x - %lf) / %lf)' % (name, alpha, math.exp(beta))) def forward(self, input_0): primals_1 = self.alpha primals_2 = self.beta primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
SoftLarge
false
17,144
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
TernaryLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/44/c443e4qjhu2egd7bs5l5244as6qcrvoh4ircgpisnx4w4opzphoh.py # Topologically Sorted Source Nodes: [left_index, right_index, setitem, setitem_1, or_, invert, setitem_2], Original ATen: [aten.lt, aten.ge, aten.lift_fresh, aten.index_put, aten.bitwise_or, aten.bitwise_not] # Source node to ATen node mapping: # invert => bitwise_not # left_index => lt # or_ => bitwise_or # right_index => ge # setitem => full_default, index_put # setitem_1 => full_default_1, index_put_1 # setitem_2 => full_default_2, index_put_2 # Graph fragment: # %lt : [num_users=3] = call_function[target=torch.ops.aten.lt.Scalar](args = (%primals_1, -0.25), kwargs = {}) # %ge : [num_users=3] = call_function[target=torch.ops.aten.ge.Scalar](args = (%primals_1, 0.25), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_1, [%lt], %full_default), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_1 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%ge], %full_default_1), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%lt, %ge), kwargs = {}) # %bitwise_not : [num_users=2] = call_function[target=torch.ops.aten.bitwise_not.default](args = (%bitwise_or,), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_1, [%bitwise_not], %full_default_2), kwargs = {}) triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0 = async_compile.triton('triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -0.25 tmp2 = tmp0 < tmp1 tmp3 = -1.0 tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = 0.25 tmp6 = tmp0 >= tmp5 tmp7 = 1.0 tmp8 = tl.where(tmp6, tmp7, tmp4) tmp9 = tmp2 | tmp6 tmp10 = tmp9 == 0 tmp11 = 0.0 tmp12 = tl.where(tmp10, tmp11, tmp8) tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr1 + (x0), tmp6, xmask) tl.store(out_ptr2 + (x0), tmp10, xmask) tl.store(in_out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [left_index, right_index, setitem, setitem_1, or_, invert, setitem_2], Original ATen: [aten.lt, aten.ge, aten.lift_fresh, aten.index_put, aten.bitwise_or, aten.bitwise_not] stream0 = get_raw_stream(0) triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0.run(buf5, primals_1, buf0, buf1, buf4, 16, grid=grid(16), stream=stream0) del primals_1 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del buf5 del primals_2 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, buf1, buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F from torch.nn import init class Ternary(nn.Module): """ Ternarize the input activations to -1, 0, 1. """ def __init__(self, left=-0.25, right=0.25): super().__init__() self.left = left self.right = right def forward(self, input): input = input.clone() left_index = input.lt(self.left) right_index = input.ge(self.right) input[left_index] = -1 input[right_index] = 1 input[~(left_index | right_index)] = 0 return input def backward(self, grad_output): grad_input = grad_output.clone() return grad_input class TernaryLinear(nn.Module): def __init__(self, in_features, out_features, ternarize_left=-0.25, ternarize_right=0.25, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) self.ternarize = Ternary(ternarize_left, ternarize_right) if bias: self.bias = nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) init.normal_(self.weight.data, 0, 1) def forward(self, input): return F.linear(input, self.ternarize(self.weight), self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.25 tmp2 = tmp0 < tmp1 tmp3 = -1.0 tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = 0.25 tmp6 = tmp0 >= tmp5 tmp7 = 1.0 tmp8 = tl.where(tmp6, tmp7, tmp4) tmp9 = tmp2 | tmp6 tmp10 = tmp9 == 0 tmp11 = 0.0 tmp12 = tl.where(tmp10, tmp11, tmp8) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) tl.store(out_ptr2 + x0, tmp10, xmask) tl.store(in_out_ptr0 + x0, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) buf5 = buf3 del buf3 get_raw_stream(0) triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0[ grid(16)](buf5, primals_1, buf0, buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del buf5 del primals_2 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, buf1, buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class Ternary(nn.Module): """ Ternarize the input activations to -1, 0, 1. """ def __init__(self, left=-0.25, right=0.25): super().__init__() self.left = left self.right = right def forward(self, input): input = input.clone() left_index = input.lt(self.left) right_index = input.ge(self.right) input[left_index] = -1 input[right_index] = 1 input[~(left_index | right_index)] = 0 return input def backward(self, grad_output): grad_input = grad_output.clone() return grad_input class TernaryLinearNew(nn.Module): def __init__(self, in_features, out_features, ternarize_left=-0.25, ternarize_right=0.25, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) self.ternarize = Ternary(ternarize_left, ternarize_right) if bias: self.bias = nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) init.normal_(self.weight.data, 0, 1) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
TernaryLinear
false
17,145
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
FinalPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/be/cbe62bamauzqfphojicwtl7ho6myyvnylv4btfha4nkoa3jpcrke.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class FinalPool(torch.nn.Module): def __init__(self): super(FinalPool, self).__init__() def forward(self, input): """ input : Tensor of shape (batch size, T, Cin) Outputs a Tensor of shape (batch size, Cin). """ return input.max(dim=1)[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class FinalPoolNew(torch.nn.Module): def __init__(self): super(FinalPoolNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CoraJung/flexible-input-slu
FinalPool
false
17,146
[ "Apache-2.0" ]
7
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
BinaryPrimitivesPredefined_v2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/3v/c3vwmsp6fpmxhgr6qslxgqusfaw4dh2ob5mmjii52p2lhxlxicg7.py # Topologically Sorted Source Nodes: [sub_1, states_expand_1], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # states_expand_1 => div # sub_1 => sub_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %select), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %select_1), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 3 x2 = (xindex // 9) % 4 x3 = (xindex // 36) x4 = xindex tmp16 = tl.load(in_ptr1 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr1 + (1)) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp0 = tl.full([1], 0, tl.int64) tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp0, tmp1) tmp4 = tl.load(in_ptr0 + (tmp3 + (4*((5 + x0) % 4)) + (16*x2) + (16*((5 + x0 + (4*x1)) // 16)) + (64*x3) + (64*((5 + x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (tmp3 + (4*x1) + (4*((5 + x0) // 4)) + (16*x2) + (16*((5 + x0 + (4*x1)) // 16)) + (64*x3) + (64*((5 + x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp1 < tmp1 tmp9 = tl.where(tmp8, tmp0, tmp1) tmp10 = tl.load(in_ptr0 + (tmp9 + (4*((5 + x0) % 4)) + (16*x2) + (16*((5 + x0 + (4*x1)) // 16)) + (64*x3) + (64*((5 + x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (tmp9 + (4*x1) + (4*((5 + x0) // 4)) + (16*x2) + (16*((5 + x0 + (4*x1)) // 16)) + (64*x3) + (64*((5 + x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp7 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp18 = tmp15 - tmp17 tmp21 = tmp18 / tmp20 tl.store(out_ptr0 + (x4), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gb/cgbulz542mfifd7ctufvbyyishj7wis2qlqyg7esepsxww2q5bke.py # Topologically Sorted Source Nodes: [sub_1, states_expand_1, sub_2, truediv_1, sigmoid], Original ATen: [aten.sub, aten.div, aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # states_expand_1 => div # sub_1 => sub_1 # sub_2 => sub_2 # truediv_1 => div_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %select), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %select_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %view_3), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 1.0), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div_1,), kwargs = {}) triton_poi_fused_div_sigmoid_sub_1 = async_compile.triton('triton_poi_fused_div_sigmoid_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sigmoid_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 10) x0 = xindex % 10 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3, 1), (36, 9, 3, 1, 144), torch.float32) # Topologically Sorted Source Nodes: [sub_1, states_expand_1], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(primals_1, primals_2, buf0, 144, grid=grid(144), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 3, 3, 10), (360, 90, 30, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, states_expand_1, sub_2, truediv_1, sigmoid], Original ATen: [aten.sub, aten.div, aten.sigmoid] triton_poi_fused_div_sigmoid_sub_1.run(buf0, primals_3, buf1, 1440, grid=grid(1440), stream=stream0) del buf0 del primals_3 return (buf1, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class BinaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.distance = Distance() self.primitive_list = ['dist'] self.ineqs.update({'dist': Inequality(out_dim=cmp_dim, distribution ='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, state_dim] return [batch, length, n_agents, n_agents, out_dim] """ n_agents = states.size(2) p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1) p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1) ineqs_inputs = {'dist': self.distance(p1, p2, dim_index=(0, 1)). squeeze(4)[:, :, 1:, 1:]} output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for k in self.primitive_list], dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 3 x2 = xindex // 9 % 4 x3 = xindex // 36 x4 = xindex tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr1 + 1) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp0 = tl.full([1], 0, tl.int64) tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp0, tmp1) tmp4 = tl.load(in_ptr0 + (tmp3 + 4 * ((5 + x0) % 4) + 16 * x2 + 16 * (( 5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (tmp3 + 4 * x1 + 4 * ((5 + x0) // 4) + 16 * x2 + 16 * ((5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp1 < tmp1 tmp9 = tl.where(tmp8, tmp0, tmp1) tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * ((5 + x0) % 4) + 16 * x2 + 16 * ( (5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (tmp9 + 4 * x1 + 4 * ((5 + x0) // 4) + 16 * x2 + 16 * ((5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp7 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp18 = tmp15 - tmp17 tmp21 = tmp18 / tmp20 tl.store(out_ptr0 + x4, tmp21, xmask) @triton.jit def triton_poi_fused_div_sigmoid_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 x0 = xindex % 10 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3, 1), (36, 9, 3, 1, 144), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(144)](primals_1, primals_2, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 3, 3, 10), (360, 90, 30, 10, 1), torch.float32) triton_poi_fused_div_sigmoid_sub_1[grid(1440)](buf0, primals_3, buf1, 1440, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 return buf1, buf1 def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class BinaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.distance = Distance() self.primitive_list = ['dist'] self.ineqs.update({'dist': Inequality(out_dim=cmp_dim, distribution ='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, input_0): primals_3 = self.ineqs.dist.thresholds primals_2 = self.ineqs.dist.normalize.param primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
BinaryPrimitivesPredefined_v2
false
17,147
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
BinaryPrimitivesPredefined
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xp/cxpkkdce37qynf2uogy6kep3y6mbxifrydu5aikvt7jalhkg4aqj.py # Topologically Sorted Source Nodes: [sub_1, states_expand_1], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # states_expand_1 => div # sub_1 => sub_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %select), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %select_1), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp16 = tl.load(in_ptr1 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr1 + (1)) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp0 = tl.full([1], 0, tl.int64) tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp0, tmp1) tmp4 = tl.load(in_ptr0 + (tmp3 + (4*x0) + (16*x2) + (16*((x0 + (4*x1)) // 16)) + (64*x3) + (64*((x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (tmp3 + (4*x1) + (16*x2) + (16*((x0 + (4*x1)) // 16)) + (64*x3) + (64*((x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp1 < tmp1 tmp9 = tl.where(tmp8, tmp0, tmp1) tmp10 = tl.load(in_ptr0 + (tmp9 + (4*x0) + (16*x2) + (16*((x0 + (4*x1)) // 16)) + (64*x3) + (64*((x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (tmp9 + (4*x1) + (16*x2) + (16*((x0 + (4*x1)) // 16)) + (64*x3) + (64*((x0 + (4*x1) + (16*x2)) // 64))), xmask, eviction_policy='evict_last') tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp7 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp18 = tmp15 - tmp17 tmp21 = tmp18 / tmp20 tl.store(out_ptr0 + (x4), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ci/cciebpx2gt3xc6bxuiitfhw6ybwulrykqupgbg32ol2s3xjp43e4.py # Topologically Sorted Source Nodes: [sub_1, states_expand_1, sub_2, truediv_1, sigmoid], Original ATen: [aten.sub, aten.div, aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # states_expand_1 => div # sub_1 => sub_1 # sub_2 => sub_2 # truediv_1 => div_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %select), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %select_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %view_3), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 1.0), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%div_1,), kwargs = {}) triton_poi_fused_div_sigmoid_sub_1 = async_compile.triton('triton_poi_fused_div_sigmoid_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sigmoid_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 10) x0 = xindex % 10 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) # Topologically Sorted Source Nodes: [sub_1, states_expand_1], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4, 10), (640, 160, 40, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, states_expand_1, sub_2, truediv_1, sigmoid], Original ATen: [aten.sub, aten.div, aten.sigmoid] triton_poi_fused_div_sigmoid_sub_1.run(buf0, primals_3, buf1, 2560, grid=grid(2560), stream=stream0) del buf0 del primals_3 return (buf1, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class BinaryPrimitivesPredefined(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.distance = Distance() self.primitive_list = ['dist_xy'] self.ineqs.update({'dist_xy': Inequality(out_dim=cmp_dim, distribution='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, state_dim] return [batch, length, n_agents, n_agents, out_dim] """ n_agents = states.size(2) p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1) p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1) ineqs_inputs = {'dist_xy': self.distance(p1, p2, dim_index=(0, 1)). squeeze(4)} output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for k in self.primitive_list], dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr1 + 1) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp0 = tl.full([1], 0, tl.int64) tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.where(tmp2, tmp0, tmp1) tmp4 = tl.load(in_ptr0 + (tmp3 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (tmp3 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp1 < tmp1 tmp9 = tl.where(tmp8, tmp0, tmp1) tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1 ) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (tmp9 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1 ) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last') tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp7 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp18 = tmp15 - tmp17 tmp21 = tmp18 / tmp20 tl.store(out_ptr0 + x4, tmp21, xmask) @triton.jit def triton_poi_fused_div_sigmoid_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 x0 = xindex % 10 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4, 10), (640, 160, 40, 10, 1), torch.float32) triton_poi_fused_div_sigmoid_sub_1[grid(2560)](buf0, primals_3, buf1, 2560, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 return buf1, buf1 def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class BinaryPrimitivesPredefinedNew(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.distance = Distance() self.primitive_list = ['dist_xy'] self.ineqs.update({'dist_xy': Inequality(out_dim=cmp_dim, distribution='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, input_0): primals_3 = self.ineqs.dist_xy.thresholds primals_2 = self.ineqs.dist_xy.normalize.param primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
BinaryPrimitivesPredefined
false
17,148
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
Conv3BN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gy/cgysvrjthjxgwk32achz7uofxprlmkqhwkmtl7thbtp7kohn4kl6.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.backends.cudnn def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class Conv3BN(nn.Module): def __init__(self, in_: 'int', out: 'int', bn=False): super().__init__() self.conv = conv3x3(in_, out) self.bn = nn.BatchNorm2d(out) if bn else None self.activation = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_': 4, 'out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class Conv3BNNew(nn.Module): def __init__(self, in_: 'int', out: 'int', bn=False): super().__init__() self.conv = conv3x3(in_, out) self.bn = nn.BatchNorm2d(out) if bn else None self.activation = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CalebEverett/fastai-dl2
Conv3BN
false
17,149
[ "Apache-2.0" ]
4
64d23592eddca6ca1f3647e73c319e97c8eb392b
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
MatrixTree
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/vp/cvpuatto4d65brcast5qwxjbpm7oqa4pzv6ettrz3hj3hvdmoopb.py # Topologically Sorted Source Nodes: [eye, ne, lap, sum_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye => eq, full_default, full_default_1, iota_1, where # lap => full_default_2, where_1 # ne => ne # sum_1 => sum_1 # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne, %full_default_2, %select), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_1, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_0 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (x0), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0), xmask) tmp25 = tl.load(in_ptr0 + (8 + x0), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/e6/ce6wm6ousb32bcuykza77m6mtkc2z5cngagyvrqesohqbv3mai2n.py # Topologically Sorted Source Nodes: [eye, ne, lap, neg, diag, lap_1, diag_1, exp_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag => eq_1, full_default_3, iota_2, where_2 # diag_1 => diagonal_copy # exp_1 => exp_1 # eye => eq, full_default, full_default_1, iota_1, where # lap => full_default_2, where_1 # lap_1 => add_1 # ne => ne # neg => neg # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne, %full_default_2, %select), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_1,), kwargs = {}) # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_2, %unsqueeze_2), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %permute, %full_default_3), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %where_2), kwargs = {}) # %diagonal_copy : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_1,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy,), kwargs = {}) # %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_1, %exp_1, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/du/cdu7ybzc3j3r3wh4temrbxtitshfr6exh5habsjqpjnbub2gzazc.py # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, sum_2], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye_1 => eq_3, full_default_7, full_default_8, iota_7, where_4 # lap_2 => full_default_9, where_5 # ne_1 => ne_1 # sum_2 => sum_2 # Graph fragment: # %iota_7 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_6, %iota_7), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_3, %full_default_7, %full_default_8), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_4, 0), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %full_default_9, %select_20), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_5, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_2 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (16 + x0), xmask) tmp16 = tl.load(in_ptr0 + (20 + x0), xmask) tmp25 = tl.load(in_ptr0 + (24 + x0), xmask) tmp34 = tl.load(in_ptr0 + (28 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2z/c2zdtqflxv2exb2bqmsudm5ctzjvnpxehknamdqaosucojhwc2gm.py # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, neg_1, diag_5, lap_3, diag_6, exp_5], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag_5 => eq_4, full_default_10, iota_8, where_6 # diag_6 => diagonal_copy_3 # exp_5 => exp_5 # eye_1 => eq_3, full_default_7, full_default_8, iota_7, where_4 # lap_2 => full_default_9, where_5 # lap_3 => add_3 # ne_1 => ne_1 # neg_1 => neg_1 # Graph fragment: # %iota_7 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_6, %iota_7), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_3, %full_default_7, %full_default_8), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_4, 0), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %full_default_9, %select_20), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_5,), kwargs = {}) # %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_4 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_8, %unsqueeze_8), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_4, %permute_5, %full_default_10), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, %where_6), kwargs = {}) # %diagonal_copy_3 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_21,), kwargs = {}) # %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy_3,), kwargs = {}) # %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_3, %exp_5, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (16 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (16 + x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/uj/cuj4vy6frw232alf4fbgm6krjokwhs7z3gq4vye7xmetrcmifmks.py # Topologically Sorted Source Nodes: [diag_4, add_2], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # diag_4 => eq_2, full_default_6, iota_4, where_3 # Graph fragment: # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_4, %unsqueeze_5), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_2, %permute_4, %full_default_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_16, %where_3), kwargs = {}) triton_poi_fused_add_diag_embed_4 = async_compile.triton('triton_poi_fused_add_diag_embed_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp4 = tl.load(in_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (x2), xmask) tmp18 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tmp2 == tmp0 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 0.0 tmp9 = tl.where(tmp3, tmp8, tmp7) tmp10 = x1 tmp11 = tmp10 == tmp0 tmp13 = tmp5 * tmp12 tmp14 = tl.where(tmp11, tmp8, tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.where(tmp1, tmp15, tmp4) tmp17 = tmp2 == tmp10 tmp19 = tl_math.exp(tmp18) tmp21 = tmp19 * tmp20 tmp22 = tl.where(tmp17, tmp21, tmp8) tmp23 = tmp16 + tmp22 tl.store(out_ptr0 + (x2), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pn/cpns45qzeow55p653fci3bv3aw3di4uxbd5eqhbtsahfi4sjyrcv.py # Topologically Sorted Source Nodes: [exp_2, mul, setitem_1, exp_3, mul_1, setitem_2, sub, diag_4, add_2], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # diag_4 => eq_2, full_default_6, iota_4, where_3 # exp_2 => exp_2 # exp_3 => exp_3 # mul => mul # mul_1 => mul_1 # setitem_1 => copy_1, full_default_4 # setitem_2 => copy_2, full_default_5 # sub => sub # Graph fragment: # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_5,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_2, %permute_1), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_7, %full_default_4), kwargs = {}) # %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul, %copy_1, 1, 0), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_6,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %permute_2), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_9, %full_default_5), kwargs = {}) # %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_1, %copy_2, 0, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_1, %select_scatter_default_2), kwargs = {}) # %select_scatter_default_3 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%arg0_1, %sub, 0, 0), kwargs = {}) # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_4, %unsqueeze_5), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_2, %permute_4, %full_default_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_16, %where_3), kwargs = {}) # %select_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %add_2, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tmp4 == tmp1 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 * tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp5, tmp10, tmp9) tmp12 = x1 tmp13 = tmp12 == tmp1 tmp15 = tmp7 * tmp14 tmp16 = tl.where(tmp13, tmp10, tmp15) tmp17 = tmp11 - tmp16 tmp19 = tl.where(tmp2, tmp17, tmp18) tmp20 = tl.where(tmp2, tmp3, tmp19) tl.store(out_ptr0 + (x5), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xu/cxuu67kd2ih7hlfxvzxdodliqss5vd2dnlmciv7rtywdzwxj2owo.py # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, sum_3], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye_2 => eq_6, full_default_14, full_default_15, iota_13, where_8 # lap_4 => full_default_16, where_9 # ne_2 => ne_2 # sum_3 => sum_3 # Graph fragment: # %iota_13 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_6 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_12, %iota_13), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_6, %full_default_14, %full_default_15), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_8, 0), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_2, %full_default_16, %select_41), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_9, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_6 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp16 = tl.load(in_ptr0 + (36 + x0), xmask) tmp25 = tl.load(in_ptr0 + (40 + x0), xmask) tmp34 = tl.load(in_ptr0 + (44 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/uv/cuv5b6xmsdhxebbwbekrugevp7tta3qd3pbwz7r5f25lrlr6kvyp.py # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, neg_2, diag_10, lap_5, diag_11, exp_9], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag_10 => eq_7, full_default_17, iota_14, where_10 # diag_11 => diagonal_copy_6 # exp_9 => exp_9 # eye_2 => eq_6, full_default_14, full_default_15, iota_13, where_8 # lap_4 => full_default_16, where_9 # lap_5 => add_5 # ne_2 => ne_2 # neg_2 => neg_2 # Graph fragment: # %iota_13 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_6 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_12, %iota_13), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_6, %full_default_14, %full_default_15), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_8, 0), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_2, %full_default_16, %select_41), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_9,), kwargs = {}) # %iota_14 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_7 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_14, %unsqueeze_14), kwargs = {}) # %full_default_17 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_10 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_7, %permute_10, %full_default_17), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_2, %where_10), kwargs = {}) # %diagonal_copy_6 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_42,), kwargs = {}) # %exp_9 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy_6,), kwargs = {}) # %select_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_5, %exp_9, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (32 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (32 + x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zx/czxsczymlsvzz4xfytxkrcne4jghw4l6hqt2r7yfmmnomn5tjbcp.py # Topologically Sorted Source Nodes: [diag_9, add_4], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_4 => add_4 # diag_9 => eq_5, full_default_13, iota_10, where_7 # Graph fragment: # %iota_10 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_5 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_10, %unsqueeze_11), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_5, %permute_9, %full_default_13), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_37, %where_7), kwargs = {}) triton_poi_fused_add_diag_embed_8 = async_compile.triton('triton_poi_fused_add_diag_embed_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp5 = tl.load(in_ptr0 + (16 + x2), xmask) tmp7 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x2), xmask) tmp17 = tl.load(in_ptr2 + (16 + x2), xmask) tmp20 = tl.load(in_ptr0 + (16 + (5*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 1, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ql/cqlqqv6rrej7gm4ylduo4gscfvd3erfpuwkmg2d6mwja4zzb745y.py # Topologically Sorted Source Nodes: [exp_6, mul_3, setitem_6, exp_7, mul_4, setitem_7, sub_1, diag_9, add_4], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_4 => add_4 # diag_9 => eq_5, full_default_13, iota_10, where_7 # exp_6 => exp_6 # exp_7 => exp_7 # mul_3 => mul_3 # mul_4 => mul_4 # setitem_6 => copy_6, full_default_11 # setitem_7 => copy_7, full_default_12 # sub_1 => sub_1 # Graph fragment: # %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_25,), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_6, %permute_6), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_27, %full_default_11), kwargs = {}) # %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_3, %copy_6, 1, 0), kwargs = {}) # %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_26,), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_7, %permute_7), kwargs = {}) # %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_29, %full_default_12), kwargs = {}) # %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_4, %copy_7, 0, 0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_6, %select_scatter_default_7), kwargs = {}) # %select_scatter_default_8 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %sub_1, 0, 1), kwargs = {}) # %iota_10 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_5 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_10, %unsqueeze_11), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_5, %permute_9, %full_default_13), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_37, %where_7), kwargs = {}) # %select_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_8, %add_4, 0, 1), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + (x5), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zn/cznct6seufqtrnxq7k5wa23k5tsr4a6s4kwoqkt6vdlglpootd3q.py # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, sum_4], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye_3 => eq_9, full_default_21, full_default_22, iota_19, where_12 # lap_6 => full_default_23, where_13 # ne_3 => ne_3 # sum_4 => sum_4 # Graph fragment: # %iota_19 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_9 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_18, %iota_19), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_9, %full_default_21, %full_default_22), kwargs = {}) # %ne_3 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_12, 0), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_3, %full_default_23, %select_62), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_13, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_10 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (48 + x0), xmask) tmp16 = tl.load(in_ptr0 + (52 + x0), xmask) tmp25 = tl.load(in_ptr0 + (56 + x0), xmask) tmp34 = tl.load(in_ptr0 + (60 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cj/ccje3ga6gm7gxrs6lb3vzzspmz3npzhdx4eufnbnn4bza7ikwddb.py # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, neg_3, diag_15, lap_7, diag_16, exp_13], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag_15 => eq_10, full_default_24, iota_20, where_14 # diag_16 => diagonal_copy_9 # exp_13 => exp_13 # eye_3 => eq_9, full_default_21, full_default_22, iota_19, where_12 # lap_6 => full_default_23, where_13 # lap_7 => add_7 # ne_3 => ne_3 # neg_3 => neg_3 # Graph fragment: # %iota_19 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_9 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_18, %iota_19), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_9, %full_default_21, %full_default_22), kwargs = {}) # %ne_3 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_12, 0), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_3, %full_default_23, %select_62), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_13,), kwargs = {}) # %iota_20 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_10 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_20, %unsqueeze_20), kwargs = {}) # %full_default_24 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_14 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_10, %permute_15, %full_default_24), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_3, %where_14), kwargs = {}) # %diagonal_copy_9 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_63,), kwargs = {}) # %exp_13 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy_9,), kwargs = {}) # %select_scatter_default_14 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_7, %exp_13, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (48 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pn/cpn2v3a4lgavycp7pw6hgu5hqzgq46epn65bvsewcsgil723mihm.py # Topologically Sorted Source Nodes: [diag_14, add_6], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_6 => add_6 # diag_14 => eq_8, full_default_20, iota_16, where_11 # Graph fragment: # %iota_16 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_8 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_16, %unsqueeze_17), kwargs = {}) # %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_8, %permute_14, %full_default_20), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_58, %where_11), kwargs = {}) triton_poi_fused_add_diag_embed_12 = async_compile.triton('triton_poi_fused_add_diag_embed_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp5 = tl.load(in_ptr0 + (32 + x2), xmask) tmp7 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x2), xmask) tmp17 = tl.load(in_ptr2 + (32 + x2), xmask) tmp20 = tl.load(in_ptr0 + (32 + (5*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 2, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pf/cpfq63tk4m2243aundthr33vn54ek4yy5rpvd4qx3nysiiqhk7an.py # Topologically Sorted Source Nodes: [exp_10, mul_6, setitem_11, exp_11, mul_7, setitem_12, sub_2, diag_14, add_6], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_6 => add_6 # diag_14 => eq_8, full_default_20, iota_16, where_11 # exp_10 => exp_10 # exp_11 => exp_11 # mul_6 => mul_6 # mul_7 => mul_7 # setitem_11 => copy_11, full_default_18 # setitem_12 => copy_12, full_default_19 # sub_2 => sub_2 # Graph fragment: # %exp_10 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_46,), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_10, %permute_11), kwargs = {}) # %full_default_18 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_48, %full_default_18), kwargs = {}) # %select_scatter_default_11 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_6, %copy_11, 1, 0), kwargs = {}) # %exp_11 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_47,), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_11, %permute_12), kwargs = {}) # %full_default_19 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_50, %full_default_19), kwargs = {}) # %select_scatter_default_12 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_7, %copy_12, 0, 0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_11, %select_scatter_default_12), kwargs = {}) # %select_scatter_default_13 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_10, %sub_2, 0, 2), kwargs = {}) # %iota_16 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_8 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_16, %unsqueeze_17), kwargs = {}) # %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_8, %permute_14, %full_default_20), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_58, %where_11), kwargs = {}) # %select_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_13, %add_6, 0, 2), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + (x5), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mn/cmnzq7y6l7bbvbguv4zgtgukuir43qo6xnjghi3eqjocn7zcxcwm.py # Topologically Sorted Source Nodes: [diag_19, add_8], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_8 => add_8 # diag_19 => eq_11, full_default_27, iota_22, where_15 # Graph fragment: # %iota_22 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_11 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_22, %unsqueeze_23), kwargs = {}) # %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_15 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_11, %permute_19, %full_default_27), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_79, %where_15), kwargs = {}) triton_poi_fused_add_diag_embed_14 = async_compile.triton('triton_poi_fused_add_diag_embed_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp5 = tl.load(in_ptr0 + (48 + x2), xmask) tmp7 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x2), xmask) tmp17 = tl.load(in_ptr2 + (48 + x2), xmask) tmp20 = tl.load(in_ptr0 + (48 + (5*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 3, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3p/c3p4mgzau45jj52ckoani65yfa6a567t65g72xqsdrdzfv7abi5b.py # Topologically Sorted Source Nodes: [exp_14, mul_9, setitem_16, exp_15, mul_10, setitem_17, sub_3, diag_19, add_8], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_8 => add_8 # diag_19 => eq_11, full_default_27, iota_22, where_15 # exp_14 => exp_14 # exp_15 => exp_15 # mul_10 => mul_10 # mul_9 => mul_9 # setitem_16 => copy_16, full_default_25 # setitem_17 => copy_17, full_default_26 # sub_3 => sub_3 # Graph fragment: # %exp_14 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_67,), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_14, %permute_16), kwargs = {}) # %full_default_25 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_69, %full_default_25), kwargs = {}) # %select_scatter_default_16 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_9, %copy_16, 1, 0), kwargs = {}) # %exp_15 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_68,), kwargs = {}) # %mul_10 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_15, %permute_17), kwargs = {}) # %full_default_26 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_71, %full_default_26), kwargs = {}) # %select_scatter_default_17 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_10, %copy_17, 0, 0), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_16, %select_scatter_default_17), kwargs = {}) # %select_scatter_default_18 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_15, %sub_3, 0, 3), kwargs = {}) # %iota_22 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_11 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_22, %unsqueeze_23), kwargs = {}) # %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_15 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_11, %permute_19, %full_default_27), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_79, %where_15), kwargs = {}) # %select_scatter_default_19 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_18, %add_8, 0, 3), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (48 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + (x5), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [eye, ne, lap, sum_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_eye_masked_fill_ne_sum_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [eye, ne, lap, neg, diag, lap_1, diag_1, exp_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1.run(arg0_1, buf0, buf1, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [eye, ne, lap, neg, diag, lap_1, diag_1, exp_1, inv_laplacian], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf2 = torch.ops.aten.linalg_inv_ex.default(buf1) buf3 = buf2[0] del buf2 buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, sum_2], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] triton_poi_fused_eye_masked_fill_ne_sum_2.run(arg0_1, buf5, 4, grid=grid(4), stream=stream0) buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, neg_1, diag_5, lap_3, diag_6, exp_5], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3.run(arg0_1, buf5, buf6, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, neg_1, diag_5, lap_3, diag_6, exp_5, inv_laplacian_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf7 = torch.ops.aten.linalg_inv_ex.default(buf6) buf8 = buf7[0] del buf7 buf10 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [diag_4, add_2], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_4.run(arg0_1, buf3, buf10, 16, grid=grid(16), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp_2, mul, setitem_1, exp_3, mul_1, setitem_2, sub, diag_4, add_2], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5.run(buf10, arg0_1, buf3, buf11, 64, grid=grid(64), stream=stream0) del buf10 buf12 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, sum_3], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] triton_poi_fused_eye_masked_fill_ne_sum_6.run(arg0_1, buf12, 4, grid=grid(4), stream=stream0) buf13 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, neg_2, diag_10, lap_5, diag_11, exp_9], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7.run(arg0_1, buf12, buf13, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, neg_2, diag_10, lap_5, diag_11, exp_9, inv_laplacian_2], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf14 = torch.ops.aten.linalg_inv_ex.default(buf13) buf15 = buf14[0] del buf14 buf17 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [diag_9, add_4], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_8.run(arg0_1, buf8, buf11, buf17, 16, grid=grid(16), stream=stream0) buf18 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [exp_6, mul_3, setitem_6, exp_7, mul_4, setitem_7, sub_1, diag_9, add_4], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9.run(buf18, buf17, arg0_1, buf8, 64, grid=grid(64), stream=stream0) del buf17 buf19 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, sum_4], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] triton_poi_fused_eye_masked_fill_ne_sum_10.run(arg0_1, buf19, 4, grid=grid(4), stream=stream0) buf20 = reinterpret_tensor(buf8, (4, 4), (4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, neg_3, diag_15, lap_7, diag_16, exp_13], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11.run(arg0_1, buf19, buf20, 16, grid=grid(16), stream=stream0) del buf19 # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, neg_3, diag_15, lap_7, diag_16, exp_13, inv_laplacian_3], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf21 = torch.ops.aten.linalg_inv_ex.default(buf20) buf22 = buf21[0] del buf21 buf24 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [diag_14, add_6], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_12.run(arg0_1, buf15, buf18, buf24, 16, grid=grid(16), stream=stream0) buf25 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [exp_10, mul_6, setitem_11, exp_11, mul_7, setitem_12, sub_2, diag_14, add_6], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13.run(buf25, buf24, arg0_1, buf15, 64, grid=grid(64), stream=stream0) del buf15 buf26 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [diag_19, add_8], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_14.run(arg0_1, buf22, buf25, buf26, 16, grid=grid(16), stream=stream0) buf27 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [exp_14, mul_9, setitem_16, exp_15, mul_10, setitem_17, sub_3, diag_19, add_8], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15.run(buf27, buf26, arg0_1, buf22, 64, grid=grid(64), stream=stream0) del arg0_1 del buf22 del buf26 return (buf27, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class MatrixTree(nn.Module): """Implementation of the matrix-tree theorem for computing marginals of non-projective dependency parsing. This attention layer is used in the paper "Learning Structured Text Representations" :cite:`DBLP:journals/corr/LiuL17d`. """ def __init__(self, eps=1e-05): self.eps = eps super(MatrixTree, self).__init__() def forward(self, input): laplacian = input.exp() + self.eps output = input.clone() for b in range(input.size(0)): lap = laplacian[b].masked_fill(torch.eye(input.size(1), device= input.device).ne(0), 0) lap = -lap + torch.diag(lap.sum(0)) lap[0] = input[b].diag().exp() inv_laplacian = lap.inverse() factor = inv_laplacian.diag().unsqueeze(1).expand_as(input[b] ).transpose(0, 1) term1 = input[b].exp().mul(factor).clone() term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone() term1[:, 0] = 0 term2[0] = 0 output[b] = term1 - term2 roots_output = input[b].diag().exp().mul(inv_laplacian. transpose(0, 1)[0]) output[b] = output[b] + torch.diag(roots_output) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + x0, xmask) tmp16 = tl.load(in_ptr0 + (4 + x0), xmask) tmp25 = tl.load(in_ptr0 + (8 + x0), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + x2, xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (16 + x0), xmask) tmp16 = tl.load(in_ptr0 + (20 + x0), xmask) tmp25 = tl.load(in_ptr0 + (24 + x0), xmask) tmp34 = tl.load(in_ptr0 + (28 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (16 + x2), xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp4 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + x2, xmask) tmp18 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tmp2 == tmp0 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 0.0 tmp9 = tl.where(tmp3, tmp8, tmp7) tmp10 = x1 tmp11 = tmp10 == tmp0 tmp13 = tmp5 * tmp12 tmp14 = tl.where(tmp11, tmp8, tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.where(tmp1, tmp15, tmp4) tmp17 = tmp2 == tmp10 tmp19 = tl_math.exp(tmp18) tmp21 = tmp19 * tmp20 tmp22 = tl.where(tmp17, tmp21, tmp8) tmp23 = tmp16 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tmp4 == tmp1 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 * tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp5, tmp10, tmp9) tmp12 = x1 tmp13 = tmp12 == tmp1 tmp15 = tmp7 * tmp14 tmp16 = tl.where(tmp13, tmp10, tmp15) tmp17 = tmp11 - tmp16 tmp19 = tl.where(tmp2, tmp17, tmp18) tmp20 = tl.where(tmp2, tmp3, tmp19) tl.store(out_ptr0 + x5, tmp20, xmask) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp16 = tl.load(in_ptr0 + (36 + x0), xmask) tmp25 = tl.load(in_ptr0 + (40 + x0), xmask) tmp34 = tl.load(in_ptr0 + (44 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (32 + x2), xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp5 = tl.load(in_ptr0 + (16 + x2), xmask) tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr2 + (16 + x2), xmask) tmp20 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 1, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + x5, tmp21, xmask) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (48 + x0), xmask) tmp16 = tl.load(in_ptr0 + (52 + x0), xmask) tmp25 = tl.load(in_ptr0 + (56 + x0), xmask) tmp34 = tl.load(in_ptr0 + (60 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (48 + x2), xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp5 = tl.load(in_ptr0 + (32 + x2), xmask) tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr2 + (32 + x2), xmask) tmp20 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 2, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + x5, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp5 = tl.load(in_ptr0 + (48 + x2), xmask) tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr2 + (48 + x2), xmask) tmp20 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 3, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (48 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + x5, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_eye_masked_fill_ne_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1[ grid(16)](arg0_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = torch.ops.aten.linalg_inv_ex.default(buf1) buf3 = buf2[0] del buf2 buf5 = buf0 del buf0 triton_poi_fused_eye_masked_fill_ne_sum_2[grid(4)](arg0_1, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = buf1 del buf1 triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3[ grid(16)](arg0_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = torch.ops.aten.linalg_inv_ex.default(buf6) buf8 = buf7[0] del buf7 buf10 = buf6 del buf6 triton_poi_fused_add_diag_embed_4[grid(16)](arg0_1, buf3, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5[grid(64) ](buf10, arg0_1, buf3, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 buf12 = buf5 del buf5 triton_poi_fused_eye_masked_fill_ne_sum_6[grid(4)](arg0_1, buf12, 4, XBLOCK=4, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0) del buf3 triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7[ grid(16)](arg0_1, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = torch.ops.aten.linalg_inv_ex.default(buf13) buf15 = buf14[0] del buf14 buf17 = buf13 del buf13 triton_poi_fused_add_diag_embed_8[grid(16)](arg0_1, buf8, buf11, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = buf11 del buf11 triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9[grid(64) ](buf18, buf17, arg0_1, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 buf19 = buf12 del buf12 triton_poi_fused_eye_masked_fill_ne_sum_10[grid(4)](arg0_1, buf19, 4, XBLOCK=4, num_warps=1, num_stages=1) buf20 = reinterpret_tensor(buf8, (4, 4), (4, 1), 0) del buf8 triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11[ grid(16)](arg0_1, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf19 buf21 = torch.ops.aten.linalg_inv_ex.default(buf20) buf22 = buf21[0] del buf21 buf24 = buf20 del buf20 triton_poi_fused_add_diag_embed_12[grid(16)](arg0_1, buf15, buf18, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = buf18 del buf18 triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13[grid(64) ](buf25, buf24, arg0_1, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf15 buf26 = buf24 del buf24 triton_poi_fused_add_diag_embed_14[grid(16)](arg0_1, buf22, buf25, buf26, 16, XBLOCK=16, num_warps=1, num_stages=1) buf27 = buf25 del buf25 triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15[grid(64) ](buf27, buf26, arg0_1, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf22 del buf26 return buf27, class MatrixTreeNew(nn.Module): """Implementation of the matrix-tree theorem for computing marginals of non-projective dependency parsing. This attention layer is used in the paper "Learning Structured Text Representations" :cite:`DBLP:journals/corr/LiuL17d`. """ def __init__(self, eps=1e-05): self.eps = eps super(MatrixTreeNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ChenRocks/Distill-BERT-Textgen-ONMT
MatrixTree
false
17,150
[ "MIT" ]
7
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
NullaryPrimitivesPredefined_v2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/g5/cg5itricuk36n25hqnqam7zr6ev2maqtyaq226ioova22and6nsp.py # Topologically Sorted Source Nodes: [padded_states], Original ATen: [aten.cat] # Source node to ATen node mapping: # padded_states => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %primals_1, %sub_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 6 x0 = xindex % 16 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 - tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 5, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp15 & xmask, other=0.0) tmp17 = tmp0 >= tmp13 tmp18 = tl.full([1], 6, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 * tmp6 tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 - tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp17, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp16, tmp25) tmp27 = tl.where(tmp4, tmp11, tmp26) tl.store(out_ptr0 + (x3), tmp27, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7k/c7kzgke7qljlvip3pfd6abfk45nesyqx4npxnxmrvhmdd2hmmj7i.py # Topologically Sorted Source Nodes: [padded_states_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # padded_states_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_3, %div, %sub_4], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 6 x0 = xindex % 16 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl.load(in_ptr0 + (48 + x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (16 + x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp8 tmp16 = tmp11 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr0 + (32 + x0 + (16*((-1) + x1)) + (96*x2)), tmp22 & xmask, other=0.0) tmp24 = tl.load(in_ptr0 + (x0 + (16*((-1) + x1)) + (96*x2)), tmp22 & xmask, other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tmp25 * tmp8 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp22, tmp26, tmp27) tmp29 = tmp0 >= tmp20 tmp30 = tl.full([1], 6, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tl.load(in_ptr0 + (80 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp33 = tl.load(in_ptr0 + (48 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp8 tmp36 = tmp35 * tmp10 tmp37 = tl.load(in_ptr0 + (64 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (32 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tmp39 * tmp8 tmp41 = tmp36 - tmp40 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp29, tmp41, tmp42) tmp44 = tl.where(tmp22, tmp28, tmp43) tmp45 = tl.where(tmp4, tmp18, tmp44) tl.store(out_ptr0 + (x3), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/me/cmeltya2f2ryzs3mswxq25t6slzcrpbuojoi6rw7e2m6r2ztgbit.py # Topologically Sorted Source Nodes: [sub_6, states_expand_1], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # states_expand_1 => div_2 # sub_6 => sub_6 # Graph fragment: # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %select_4), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, %select_5), kwargs = {}) triton_poi_fused_div_sub_2 = async_compile.triton('triton_poi_fused_div_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (32 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (33 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (34 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (35 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + (16*x0) + (96*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (0)) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr1 + (1)) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp27 = tmp24 - tmp26 tmp30 = tmp27 / tmp29 tl.store(in_out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pb/cpbzostqufxvwo6rys7skbtzkljqwk5x6rgajfhy33rtc5f7hgmm.py # Topologically Sorted Source Nodes: [sub_8, sub_9, add, states_expand_3, sub_11, states_expand_5], Original ATen: [aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # add => add # states_expand_3 => div_4 # states_expand_5 => div_6 # sub_11 => sub_11 # sub_8 => sub_8 # sub_9 => sub_9 # Graph fragment: # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %select_6), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_7, %select_6), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_9, 1e-05), kwargs = {}) # %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_8, %add), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %select_8), kwargs = {}) # %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_11, %select_9), kwargs = {}) triton_poi_fused_add_div_sub_3 = async_compile.triton('triton_poi_fused_add_div_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + (1)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp10 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (0)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (1)) tmp25 = tl.broadcast_to(tmp24, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp5 - tmp2 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tmp11 = tmp10 * tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp0 * tmp0 tmp16 = tmp14 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = libdevice.sqrt(tmp19) tmp23 = tmp20 - tmp22 tmp26 = tmp23 / tmp25 tl.store(out_ptr0 + (x0), tmp9, xmask) tl.store(out_ptr1 + (x0), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nh/cnh6uxldh2zbluy5tulbynxy52luyondltsdbzaonqs2nbpoh4t3.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] # Source node to ATen node mapping: # output => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sigmoid, %sigmoid_1, %sigmoid_2], -1), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.sigmoid(tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tmp14 = tl.full([1], 20, tl.int64) tmp15 = tmp0 < tmp14 tmp16 = tmp13 & tmp15 tmp17 = tl.load(in_ptr2 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr3 + ((-10) + x0), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp8 tmp21 = tl.sigmoid(tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tmp0 >= tmp14 tmp25 = tl.full([1], 30, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tl.load(in_ptr4 + (x1), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.load(in_ptr5 + ((-20) + x0), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tmp27 - tmp28 tmp30 = tmp29 * tmp8 tmp31 = tl.sigmoid(tmp30) tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype) tmp33 = tl.where(tmp24, tmp31, tmp32) tmp34 = tl.where(tmp16, tmp23, tmp33) tmp35 = tl.where(tmp4, tmp12, tmp34) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (10, ), (1, )) assert_size_stride(primals_4, (2, ), (1, )) assert_size_stride(primals_5, (10, ), (1, )) assert_size_stride(primals_6, (2, ), (1, )) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_states], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0) buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_states_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf0, buf1, 384, grid=grid(384), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [sub_6, states_expand_1], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_2.run(buf3, buf1, primals_2, 16, grid=grid(16), stream=stream0) del buf1 del primals_2 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_8, sub_9, add, states_expand_3, sub_11, states_expand_5], Original ATen: [aten.sub, aten.add, aten.div] triton_poi_fused_add_div_sub_3.run(primals_1, primals_4, primals_6, buf4, buf5, 16, grid=grid(16), stream=stream0) del primals_1 del primals_4 del primals_6 buf6 = empty_strided_cuda((4, 4, 30), (120, 30, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(buf3, primals_3, buf4, primals_5, buf5, primals_7, buf6, 480, grid=grid(480), stream=stream0) return (buf6, primals_3, primals_5, primals_7, buf3, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) class NullaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.differential = AlignDifferential() self.primitive_list = ['ball_acc', 'ball_pos_z', 'ball_speed'] self.ineqs.update({'ball_acc': Inequality(out_dim=cmp_dim, distribution='normal'), 'ball_pos_z': Inequality(out_dim= cmp_dim, distribution='uniform'), 'ball_speed': Inequality( out_dim=cmp_dim, distribution='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, state_dim] return [batch, length, out_dim] """ velocity = self.differential(states) acc = self.differential(velocity) ineqs_inputs = {'ball_acc': torch.norm(acc[:, :, 0, :], p=2, dim=2), 'ball_pos_z': states[:, :, 0, 2], 'ball_speed': torch.norm( states[:, :, 0, :], p=2, dim=2)} output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for k in self.primitive_list], dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 - tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 5, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 & xmask, other=0.0) tmp17 = tmp0 >= tmp13 tl.full([1], 6, tl.int64) tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 * tmp6 tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 - tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp17, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp16, tmp25) tmp27 = tl.where(tmp4, tmp11, tmp26) tl.store(out_ptr0 + x3, tmp27, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl.load(in_ptr0 + (48 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (16 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp8 tmp16 = tmp11 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr0 + (32 + x0 + 16 * (-1 + x1) + 96 * x2), tmp22 & xmask, other=0.0) tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 96 * x2), tmp22 & xmask, other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tmp25 * tmp8 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp22, tmp26, tmp27) tmp29 = tmp0 >= tmp20 tl.full([1], 6, tl.int64) tmp32 = tl.load(in_ptr0 + (80 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp33 = tl.load(in_ptr0 + (48 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp8 tmp36 = tmp35 * tmp10 tmp37 = tl.load(in_ptr0 + (64 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (32 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tmp39 * tmp8 tmp41 = tmp36 - tmp40 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp29, tmp41, tmp42) tmp44 = tl.where(tmp22, tmp28, tmp43) tmp45 = tl.where(tmp4, tmp18, tmp44) tl.store(out_ptr0 + x3, tmp45, xmask) @triton.jit def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (32 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (16 * x0 + 96 * x1), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (33 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (34 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (35 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 16 * x0 + 96 * x1), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + 0) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr1 + 1) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp27 = tmp24 - tmp26 tmp30 = tmp27 / tmp29 tl.store(in_out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + 1) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp10 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr2 + 0) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + 1) tmp25 = tl.broadcast_to(tmp24, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp5 - tmp2 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tmp11 = tmp10 * tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp15 = tmp0 * tmp0 tmp16 = tmp14 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = libdevice.sqrt(tmp19) tmp23 = tmp20 - tmp22 tmp26 = tmp23 / tmp25 tl.store(out_ptr0 + x0, tmp9, xmask) tl.store(out_ptr1 + x0, tmp26, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.sigmoid(tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tmp14 = tl.full([1], 20, tl.int64) tmp15 = tmp0 < tmp14 tmp16 = tmp13 & tmp15 tmp17 = tl.load(in_ptr2 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tl.load(in_ptr3 + (-10 + x0), tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp8 tmp21 = tl.sigmoid(tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tmp0 >= tmp14 tl.full([1], 30, tl.int64) tmp27 = tl.load(in_ptr4 + x1, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.load(in_ptr5 + (-20 + x0), tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp29 = tmp27 - tmp28 tmp30 = tmp29 * tmp8 tmp31 = tl.sigmoid(tmp30) tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype) tmp33 = tl.where(tmp24, tmp31, tmp32) tmp34 = tl.where(tmp16, tmp23, tmp33) tmp35 = tl.where(tmp4, tmp12, tmp34) tl.store(out_ptr0 + x2, tmp35, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (2,), (1,)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (2,), (1,)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(384)](buf0, buf1, 384, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0) del buf2 triton_poi_fused_div_sub_2[grid(16)](buf3, buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del primals_2 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_add_div_sub_3[grid(16)](primals_1, primals_4, primals_6, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_4 del primals_6 buf6 = empty_strided_cuda((4, 4, 30), (120, 30, 1), torch.float32) triton_poi_fused_cat_4[grid(480)](buf3, primals_3, buf4, primals_5, buf5, primals_7, buf6, 480, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_3, primals_5, primals_7, buf3, buf4, buf5 class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) class NullaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.differential = AlignDifferential() self.primitive_list = ['ball_acc', 'ball_pos_z', 'ball_speed'] self.ineqs.update({'ball_acc': Inequality(out_dim=cmp_dim, distribution='normal'), 'ball_pos_z': Inequality(out_dim= cmp_dim, distribution='uniform'), 'ball_speed': Inequality( out_dim=cmp_dim, distribution='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, input_0): primals_3 = self.ineqs.ball_acc.thresholds primals_2 = self.ineqs.ball_acc.normalize.param primals_5 = self.ineqs.ball_pos_z.thresholds primals_4 = self.ineqs.ball_pos_z.normalize.param primals_7 = self.ineqs.ball_speed.thresholds primals_6 = self.ineqs.ball_speed.normalize.param primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
NullaryPrimitivesPredefined_v2
false
17,151
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
MaxPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/7q/c7qplsudd5ht2bnr4sgobrjc77djga6h2rxet736n6tskhpu5ddq.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float("-inf"), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float("-inf"), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float("-inf"), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float("-inf"), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float("-inf"), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float("-inf"), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float("-inf"), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float("-inf"), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float("-inf"), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + (x4), tmp91, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.backends.cudnn class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + x4, tmp91, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)]( arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), class MaxPoolPadNew(nn.Module): def __init__(self): super(MaxPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CalebEverett/fastai-dl2
MaxPoolPad
false
17,152
[ "Apache-2.0" ]
4
64d23592eddca6ca1f3647e73c319e97c8eb392b
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
MaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ez/cez2c66hdhdi53r34wosyuvhkd634k7fob676evmdhjkemg7anii.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=float("-inf")) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + x0 + (4*x1) + (16*x2)), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + ((-2) + x0 + (4*x1) + (16*x2)), tmp30 & xmask, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp9 tmp38 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp37 & xmask, other=float("-inf")) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = tmp36 & tmp15 tmp41 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp40 & xmask, other=float("-inf")) tmp42 = triton_helpers.maximum(tmp41, tmp39) tmp43 = tmp36 & tmp22 tmp44 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (16*x2)), tmp43 & xmask, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp42) tmp46 = tmp36 & tmp29 tmp47 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*x2)), tmp46 & xmask, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = 1 + x1 tmp50 = tmp49 >= tmp1 tmp51 = tmp49 < tmp3 tmp52 = tmp50 & tmp51 tmp53 = tmp52 & tmp9 tmp54 = tl.load(in_ptr0 + (3 + x0 + (4*x1) + (16*x2)), tmp53 & xmask, other=float("-inf")) tmp55 = triton_helpers.maximum(tmp54, tmp48) tmp56 = tmp52 & tmp15 tmp57 = tl.load(in_ptr0 + (4 + x0 + (4*x1) + (16*x2)), tmp56 & xmask, other=float("-inf")) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = tmp52 & tmp22 tmp60 = tl.load(in_ptr0 + (5 + x0 + (4*x1) + (16*x2)), tmp59 & xmask, other=float("-inf")) tmp61 = triton_helpers.maximum(tmp60, tmp58) tmp62 = tmp52 & tmp29 tmp63 = tl.load(in_ptr0 + (6 + x0 + (4*x1) + (16*x2)), tmp62 & xmask, other=float("-inf")) tmp64 = triton_helpers.maximum(tmp63, tmp61) tmp65 = 2 + x1 tmp66 = tmp65 >= tmp1 tmp67 = tmp65 < tmp3 tmp68 = tmp66 & tmp67 tmp69 = tmp68 & tmp9 tmp70 = tl.load(in_ptr0 + (7 + x0 + (4*x1) + (16*x2)), tmp69 & xmask, other=float("-inf")) tmp71 = triton_helpers.maximum(tmp70, tmp64) tmp72 = tmp68 & tmp15 tmp73 = tl.load(in_ptr0 + (8 + x0 + (4*x1) + (16*x2)), tmp72 & xmask, other=float("-inf")) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp68 & tmp22 tmp76 = tl.load(in_ptr0 + (9 + x0 + (4*x1) + (16*x2)), tmp75 & xmask, other=float("-inf")) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = tmp68 & tmp29 tmp79 = tl.load(in_ptr0 + (10 + x0 + (4*x1) + (16*x2)), tmp78 & xmask, other=float("-inf")) tmp80 = triton_helpers.maximum(tmp79, tmp77) tl.store(out_ptr0 + (x4), tmp80, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class MaxPool(nn.Module): def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False): super(MaxPool, self).__init__() self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding) def forward(self, x): if self.zero_pad: x = self.zero_pad(x) x = self.pool(x) if self.zero_pad: x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp9 tmp38 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp37 & xmask, other=float('-inf')) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = tmp36 & tmp15 tmp41 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp40 & xmask, other =float('-inf')) tmp42 = triton_helpers.maximum(tmp41, tmp39) tmp43 = tmp36 & tmp22 tmp44 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp42) tmp46 = tmp36 & tmp29 tmp47 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = 1 + x1 tmp50 = tmp49 >= tmp1 tmp51 = tmp49 < tmp3 tmp52 = tmp50 & tmp51 tmp53 = tmp52 & tmp9 tmp54 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp53 & xmask, other=float('-inf')) tmp55 = triton_helpers.maximum(tmp54, tmp48) tmp56 = tmp52 & tmp15 tmp57 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp56 & xmask, other=float('-inf')) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = tmp52 & tmp22 tmp60 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp59 & xmask, other=float('-inf')) tmp61 = triton_helpers.maximum(tmp60, tmp58) tmp62 = tmp52 & tmp29 tmp63 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask, other=float('-inf')) tmp64 = triton_helpers.maximum(tmp63, tmp61) tmp65 = 2 + x1 tmp66 = tmp65 >= tmp1 tmp67 = tmp65 < tmp3 tmp68 = tmp66 & tmp67 tmp69 = tmp68 & tmp9 tmp70 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp69 & xmask, other=float('-inf')) tmp71 = triton_helpers.maximum(tmp70, tmp64) tmp72 = tmp68 & tmp15 tmp73 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp72 & xmask, other=float('-inf')) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp68 & tmp22 tmp76 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp75 & xmask, other=float('-inf')) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = tmp68 & tmp29 tmp79 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask, other=float('-inf')) tmp80 = triton_helpers.maximum(tmp79, tmp77) tl.store(out_ptr0 + x4, tmp80, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MaxPoolNew(nn.Module): def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False): super(MaxPoolNew, self).__init__() self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
MaxPool
false
17,153
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
UnaryPrimitivesToyotaJoint
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/vd/cvdaesb4lmefwuqovb2akfhmkvtvtbgye3npj7izdjezg65yorqc.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] # Source node to ATen node mapping: # output => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sigmoid, %sigmoid_1, %sigmoid_2], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) x2 = xindex tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr1 + (1)) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp27 = tl.load(in_ptr3 + (0)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp30 = tl.load(in_ptr3 + (1)) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp45 = tl.load(in_ptr5 + (0)) tmp46 = tl.broadcast_to(tmp45, [XBLOCK]) tmp48 = tl.load(in_ptr5 + (1)) tmp49 = tl.broadcast_to(tmp48, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp5 - tmp7 tmp11 = tmp10 - tmp7 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = tmp8 / tmp13 tmp15 = tl.load(in_ptr2 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = 1.0 tmp18 = tmp16 * tmp17 tmp19 = tl.sigmoid(tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 20, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp26 = tl.load(in_ptr0 + (1 + (4*x1)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tmp26 - tmp28 tmp32 = tmp31 - tmp28 tmp33 = tmp32 + tmp12 tmp34 = tmp29 / tmp33 tmp35 = tl.load(in_ptr4 + ((-10) + x0), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp17 tmp38 = tl.sigmoid(tmp37) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp25, tmp38, tmp39) tmp41 = tmp0 >= tmp23 tmp42 = tl.full([1], 30, tl.int64) tmp43 = tmp0 < tmp42 tmp44 = tl.load(in_ptr0 + (2 + (4*x1)), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp44 - tmp46 tmp50 = tmp49 - tmp46 tmp51 = tmp50 + tmp12 tmp52 = tmp47 / tmp51 tmp53 = tl.load(in_ptr6 + ((-20) + x0), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp54 = tmp52 - tmp53 tmp55 = tmp54 * tmp17 tmp56 = tl.sigmoid(tmp55) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp41, tmp56, tmp57) tmp59 = tl.where(tmp25, tmp40, tmp58) tmp60 = tl.where(tmp4, tmp21, tmp59) tl.store(out_ptr0 + (x2), tmp60, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (10, ), (1, )) assert_size_stride(primals_4, (2, ), (1, )) assert_size_stride(primals_5, (10, ), (1, )) assert_size_stride(primals_6, (2, ), (1, )) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf0, 1920, grid=grid(1920), stream=stream0) return (buf0, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) class N_aryPrimitivesToyotaJoint(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) class UnaryPrimitivesToyotaJoint(N_aryPrimitivesToyotaJoint): def __init__(self, cmp_dim=10): super().__init__() self.differential = AlignDifferential() self.primitive_list = ['pos_x', 'pos_y', 'pos_z'] self.ineqs.update({'pos_x': Inequality(out_dim=cmp_dim, distribution='uniform'), 'pos_y': Inequality(out_dim=cmp_dim, distribution='uniform'), 'pos_z': Inequality(out_dim=cmp_dim, distribution='uniform')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_joints, state_dim] return [batch, length, n_joints, out_dim] """ velocity = self.differential(states) acc = self.differential(velocity) ineqs_inputs = {'pos_x': states[:, :, :, 0], 'pos_y': states[:, :, :, 1], 'pos_z': states[:, :, :, 2], 'speed': torch.norm( velocity, p=2, dim=3), 'acc': torch.norm(acc, p=2, dim=3)} output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, name=k, ** kwargs) for k in self.primitive_list], dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr1 + 1) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp27 = tl.load(in_ptr3 + 0) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp30 = tl.load(in_ptr3 + 1) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp45 = tl.load(in_ptr5 + 0) tmp46 = tl.broadcast_to(tmp45, [XBLOCK]) tmp48 = tl.load(in_ptr5 + 1) tmp49 = tl.broadcast_to(tmp48, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp5 - tmp7 tmp11 = tmp10 - tmp7 tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tmp14 = tmp8 / tmp13 tmp15 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = 1.0 tmp18 = tmp16 * tmp17 tmp19 = tl.sigmoid(tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp4, tmp19, tmp20) tmp22 = tmp0 >= tmp3 tmp23 = tl.full([1], 20, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp26 = tl.load(in_ptr0 + (1 + 4 * x1), tmp25 & xmask, eviction_policy= 'evict_last', other=0.0) tmp29 = tmp26 - tmp28 tmp32 = tmp31 - tmp28 tmp33 = tmp32 + tmp12 tmp34 = tmp29 / tmp33 tmp35 = tl.load(in_ptr4 + (-10 + x0), tmp25 & xmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp17 tmp38 = tl.sigmoid(tmp37) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp25, tmp38, tmp39) tmp41 = tmp0 >= tmp23 tl.full([1], 30, tl.int64) tmp44 = tl.load(in_ptr0 + (2 + 4 * x1), tmp41 & xmask, eviction_policy= 'evict_last', other=0.0) tmp47 = tmp44 - tmp46 tmp50 = tmp49 - tmp46 tmp51 = tmp50 + tmp12 tmp52 = tmp47 / tmp51 tmp53 = tl.load(in_ptr6 + (-20 + x0), tmp41 & xmask, eviction_policy= 'evict_last', other=0.0) tmp54 = tmp52 - tmp53 tmp55 = tmp54 * tmp17 tmp56 = tl.sigmoid(tmp55) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp41, tmp56, tmp57) tmp59 = tl.where(tmp25, tmp40, tmp58) tmp60 = tl.where(tmp4, tmp21, tmp59) tl.store(out_ptr0 + x2, tmp60, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (2,), (1,)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (2,), (1,)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1920)](primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf0, 1920, XBLOCK= 256, num_warps=4, num_stages=1) return (buf0, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) class N_aryPrimitivesToyotaJoint(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) class UnaryPrimitivesToyotaJointNew(N_aryPrimitivesToyotaJoint): def __init__(self, cmp_dim=10): super().__init__() self.differential = AlignDifferential() self.primitive_list = ['pos_x', 'pos_y', 'pos_z'] self.ineqs.update({'pos_x': Inequality(out_dim=cmp_dim, distribution='uniform'), 'pos_y': Inequality(out_dim=cmp_dim, distribution='uniform'), 'pos_z': Inequality(out_dim=cmp_dim, distribution='uniform')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, input_0): primals_3 = self.ineqs.pos_x.thresholds primals_2 = self.ineqs.pos_x.normalize.param primals_5 = self.ineqs.pos_y.thresholds primals_4 = self.ineqs.pos_y.normalize.param primals_7 = self.ineqs.pos_z.thresholds primals_6 = self.ineqs.pos_z.normalize.param primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
UnaryPrimitivesToyotaJoint
false
17,154
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
MaxPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/7q/c7qplsudd5ht2bnr4sgobrjc77djga6h2rxet736n6tskhpu5ddq.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float("-inf"), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float("-inf"), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float("-inf"), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float("-inf"), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float("-inf"), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float("-inf"), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float("-inf"), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float("-inf"), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float("-inf"), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + (x4), tmp91, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fe/cfekr7y7tna46asxxv4o34n56bzohihkhpuwvq5vn2zaiipxowsg.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x2 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (3*x1) + (9*x2)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + x4, tmp91, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)]( arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class MaxPoolPadNew(nn.Module): def __init__(self): super(MaxPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
MaxPoolPad
false
17,155
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
SpaceTimeRegionalConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/hr/chrdug5cz3rgcks5nljq462hayvavrlindo52voz4mx5h2krmzgs.py # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cj/ccjafl4xccjvgp3gawll3peltq2o44dgzksiz6tkl2ras43n32og.py # Topologically Sorted Source Nodes: [g, g_1, g_2], Original ATen: [aten.zeros, aten.maximum] # Source node to ATen node mapping: # g => full_default # g_1 => maximum # g_2 => maximum_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%full_default, %view), kwargs = {}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%maximum, %view_1), kwargs = {}) triton_poi_fused_maximum_zeros_1 = async_compile.triton('triton_poi_fused_maximum_zeros_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_zeros_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_maximum_zeros_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x0 = xindex % 4 x4 = xindex tmp0 = x3 tmp1 = x1 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = x2 tmp8 = x0 tmp9 = tmp7 == tmp8 tmp10 = tl.where(tmp9, tmp3, tmp4) tmp11 = triton_helpers.maximum(tmp6, tmp10) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/v4/cv47rdmcqvreczcawbclwao7hjah5322puwlalivgqjoujygbxb5.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_6,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2 + (16*y3)), xmask & ymask) tmp3 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = 0.4082482904638631 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + (x2 + (16*y3)), tmp6, xmask & ymask) tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4)) buf1 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [g, g_1, g_2], Original ATen: [aten.zeros, aten.maximum] triton_poi_fused_maximum_zeros_1.run(buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 16), (0, 16, 1), 0), reinterpret_tensor(buf2, (1, 16, 16), (0, 16, 1), 0), out=buf3) del buf1 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 4, 1, 16), 0); del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 1, 16), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_1, buf5, 16, 16, grid=grid(16, 16), stream=stream0) return (buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf5, reinterpret_tensor(buf2, (1, 16, 16), (256, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class SpaceTimeRegionalConv(nn.Module): """ Space Time Region Graph """ def __init__(self, input_dim, output_dim, t_kernel_size=1, t_stride=1, t_padding=None, t_dilation=1, bias=True, residual=True): super().__init__() self.input_dim = input_dim self.output_dim = output_dim if t_padding is None: t_padding = (t_kernel_size - 1) // 2 self.conv = nn.Conv2d(input_dim, output_dim, kernel_size=( t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias) if not residual: self.residual = lambda x: 0 elif input_dim == output_dim and t_stride == 1: self.residual = lambda x: x else: self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=1, stride=(t_stride, 1))) self.relu = nn.ReLU() def _get_graph(self, length, n_agents, device): g = torch.zeros(length, n_agents, length, n_agents, device=device) g = torch.max(g, torch.eye(length, length, device=device).view( length, 1, length, 1)) g = torch.max(g, torch.eye(n_agents, n_agents, device=device).view( 1, n_agents, 1, n_agents)) return g.view(length * n_agents, length * n_agents) def forward(self, x): """ :param x: [batch, length, n_agents, input_dim] return output: [batch, new_length, n_agents, output_dim] """ batch, length, n_agents, _ = x.size() x = x.permute(0, 3, 1, 2) res = self.residual(x) x = self.conv(x) new_length = x.size(2) g = self._get_graph(new_length, n_agents, x.device) x = x.view(batch, self.output_dim, new_length * n_agents) x = torch.einsum('ncv,vw->ncw', (x, g)).view(batch, self.output_dim, new_length, n_agents) / (length + n_agents - 2) ** 0.5 x = x + res return self.relu(x.permute(0, 2, 3, 1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_maximum_zeros_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x0 = xindex % 4 x4 = xindex tmp0 = x3 tmp1 = x1 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = x2 tmp8 = x0 tmp9 = tmp7 == tmp8 tmp10 = tl.where(tmp9, tmp3, tmp4) tmp11 = triton_helpers.maximum(tmp6, tmp10) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_out_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp3 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = 0.4082482904638631 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + (x2 + 16 * y3), tmp6, xmask & ymask) tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4)) buf1 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_maximum_zeros_1[grid(256)](buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 16), (0, 16, 1), 0), reinterpret_tensor(buf2, (1, 16, 16), (0, 16, 1), 0), out=buf3) del buf1 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 4, 1, 16), 0) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 1, 16), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(16, 16)](buf4, primals_1, buf5, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf5, reinterpret_tensor(buf2, (1, 16, 16), (256, 1, 16), 0) class SpaceTimeRegionalConvNew(nn.Module): """ Space Time Region Graph """ def __init__(self, input_dim, output_dim, t_kernel_size=1, t_stride=1, t_padding=None, t_dilation=1, bias=True, residual=True): super().__init__() self.input_dim = input_dim self.output_dim = output_dim if t_padding is None: t_padding = (t_kernel_size - 1) // 2 self.conv = nn.Conv2d(input_dim, output_dim, kernel_size=( t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias) if not residual: self.residual = lambda x: 0 elif input_dim == output_dim and t_stride == 1: self.residual = lambda x: x else: self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=1, stride=(t_stride, 1))) self.relu = nn.ReLU() def _get_graph(self, length, n_agents, device): g = torch.zeros(length, n_agents, length, n_agents, device=device) g = torch.max(g, torch.eye(length, length, device=device).view( length, 1, length, 1)) g = torch.max(g, torch.eye(n_agents, n_agents, device=device).view( 1, n_agents, 1, n_agents)) return g.view(length * n_agents, length * n_agents) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
SpaceTimeRegionalConv
false
17,156
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
Sigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/dq/cdqehz5oiavdzs4paf3kq6gcc6qh6fgbf75v3ze5mdmp7xrgksq3.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data from torch.nn import Sigmoid class Sigmoid(nn.Module): def __init__(self, inplace: 'bool'=False): super(Sigmoid, self).__init__() self.inplace = inplace def forward(self, x): return x.sigmoid_() if self.inplace else x.sigmoid() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SigmoidNew(nn.Module): def __init__(self, inplace: 'bool'=False): super(SigmoidNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
Sigmoid
false
17,157
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
AvgPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zo/czog74ucn2lnbdjfwh5klimpf64acwsdhtchikb3mnxg4dkciyec.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => avg_pool2d # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + (x4), tmp93, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fe/cfekr7y7tna46asxxv4o34n56bzohihkhpuwvq5vn2zaiipxowsg.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x2 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (3*x1) + (9*x2)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * ( 0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + ( -1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + x4, tmp93, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class AvgPoolPadNew(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
AvgPoolPad
false
17,158
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
Tanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/dz/cdz5q7vomcevzomgmojbjfpaawkdlant2zdo54dyuevzv6uvomfh.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class Tanh(nn.Module): def __init__(self, inplace: 'bool'=False): super(Tanh, self).__init__() self.inplace = inplace def forward(self, x): return x.tanh_() if self.inplace else x.tanh() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TanhNew(nn.Module): def __init__(self, inplace: 'bool'=False): super(TanhNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
Tanh
false
17,159
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
ValueNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xx/cxx7te6xswezhkaicpagyhd72nytaw2elc2vbskcmpij23vw77p7.py # Topologically Sorted Source Nodes: [softplus, tanh, mul, x], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] # Source node to ATen node mapping: # mul => mul # softplus => exp, gt, log1p, where # tanh => tanh # x => clamp_max # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %log1p), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %tanh), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%mul, 6), kwargs = {}) triton_poi_fused_clamp_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_clamp_mul_softplus_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_mul_softplus_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, tanh, mul, x], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_mul_softplus_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_1, tanh_1, mul_1, x_1], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] triton_poi_fused_clamp_mul_softplus_tanh_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_2, tanh_2, mul_2, x_2], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] triton_poi_fused_clamp_mul_softplus_tanh_0.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_9 return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def mish(x): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function https://arxiv.org/abs/1908.08681v1 implemented for PyTorch / FastAI by lessw2020 https://github.com/lessw2020/mish param: x: output of a layer of a neural network return: mish activation function """ return torch.clamp(x * torch.tanh(F.softplus(x)), max=6) class ValueNetwork(nn.Module): def __init__(self, state_dim, hidden_dim, init_w=0.003): super(ValueNetwork, self).__init__() self.linear1 = nn.Linear(state_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear2_3 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, 1) self.linear1.weight.data.uniform_(-init_w, init_w) self.linear1.bias.data.uniform_(-init_w, init_w) self.linear2.weight.data.uniform_(-init_w, init_w) self.linear2.bias.data.uniform_(-init_w, init_w) self.linear2_3.weight.data.uniform_(-init_w, init_w) self.linear2_3.bias.data.uniform_(-init_w, init_w) self.linear3.weight.data.uniform_(-init_w, init_w) self.linear3.bias.data.uniform_(-init_w, init_w) def forward(self, state): x = mish(self.linear1(state)) x = mish(self.linear2(x)) x = mish(self.linear2_3(x)) x = self.linear3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_9 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), primals_8, primals_6, primals_4 def mish(x): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function https://arxiv.org/abs/1908.08681v1 implemented for PyTorch / FastAI by lessw2020 https://github.com/lessw2020/mish param: x: output of a layer of a neural network return: mish activation function """ return torch.clamp(x * torch.tanh(F.softplus(x)), max=6) class ValueNetworkNew(nn.Module): def __init__(self, state_dim, hidden_dim, init_w=0.003): super(ValueNetworkNew, self).__init__() self.linear1 = nn.Linear(state_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear2_3 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, 1) self.linear1.weight.data.uniform_(-init_w, init_w) self.linear1.bias.data.uniform_(-init_w, init_w) self.linear2.weight.data.uniform_(-init_w, init_w) self.linear2.bias.data.uniform_(-init_w, init_w) self.linear2_3.weight.data.uniform_(-init_w, init_w) self.linear2_3.bias.data.uniform_(-init_w, init_w) self.linear3.weight.data.uniform_(-init_w, init_w) self.linear3.bias.data.uniform_(-init_w, init_w) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear2_3.weight primals_7 = self.linear2_3.bias primals_8 = self.linear3.weight primals_9 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Crawford-fang/ROS_pytorch_RL
ValueNetwork
false
17,160
[ "Apache-2.0" ]
10
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
AdaptiveConcatPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/pq/cpqgsju5p2eyjjmy3imid4jf33v3d47eyjlf6k2xstq6mzcillsk.py # Topologically Sorted Source Nodes: [adaptive_max_pool2d], Original ATen: [aten.adaptive_max_pool2d] # Source node to ATen node mapping: # adaptive_max_pool2d => adaptive_max_pool2d # Graph fragment: # %adaptive_max_pool2d : [num_users=1] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%arg0_1, [1, 1]), kwargs = {}) triton_poi_fused_adaptive_max_pool2d_0 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (16*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x2)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x2)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x0 + (8*x1)), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mq/cmqjuv33rly4whmxqjz5w75pgp36j44mgyzzrxfythdirwrwqper.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_1 = async_compile.triton('triton_per_fused_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr1 + (x2 + (8*x3)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0) # alias # Topologically Sorted Source Nodes: [adaptive_max_pool2d], Original ATen: [aten.adaptive_max_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4) # alias # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] triton_per_fused_mean_1.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.backends.cudnn class AdaptiveConcatPool2d(nn.Module): def __init__(self, sz=None): super().__init__() sz = sz or (1, 1) self.ap = nn.AdaptiveAvgPool2d(sz) self.mp = nn.AdaptiveMaxPool2d(sz) def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask) @triton.jit def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4) triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf3, class AdaptiveConcatPool2dNew(nn.Module): def __init__(self, sz=None): super().__init__() sz = sz or (1, 1) self.ap = nn.AdaptiveAvgPool2d(sz) self.mp = nn.AdaptiveMaxPool2d(sz) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CalebEverett/fastai-dl2
AdaptiveConcatPool2d
false
17,161
[ "Apache-2.0" ]
4
64d23592eddca6ca1f3647e73c319e97c8eb392b
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
SpatialCrossMapLRN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fp/cfpylbinohoan7yh5fcfp46fvbiwpo63lxt42umo3xnsuu3aeumz.py # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] # Source node to ATen node mapping: # add => add # div_2 => pow_2 # mul => mul # x => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_add_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class SpatialCrossMapLRN(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super(SpatialCrossMapLRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(self.k).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(self.k).pow(self.beta) x = x.div(div) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SpatialCrossMapLRNNew(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super(SpatialCrossMapLRNNew, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
SpatialCrossMapLRN
false
17,162
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/as/castm3qnl7zp42ut4jv2nzzsld4sxwkmvhmiehurbqvryiyrvkbn.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 250 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/75/c75pkenzkxelwav7gohaev2jhqh23bvnxlpf5ogjdxqqffikyaar.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (250, 4), (4, 1)) assert_size_stride(primals_3, (250, ), (1, )) assert_size_stride(primals_4, (100, 250), (250, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 250), (250, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 250), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 1000, grid=grid(1000), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (250, 100), (1, 250), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 400, grid=grid(400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf4, buf3, primals_1, buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((250, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((250, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 250), (250, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F class MLP(nn.Module): def __init__(self, input_dim, output_dim, dropout=0.5): super(MLP, self).__init__() self.input_fc = nn.Linear(input_dim, 250) self.hidden_fc = nn.Linear(250, 100) self.output_fc = nn.Linear(100, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, x): batch_size = x.shape[0] x = x.view(batch_size, -1) x = F.relu(self.input_fc(x)) x = self.dropout(x) x = F.relu(self.hidden_fc(x)) x = self.dropout(x) outputs = self.output_fc(x) return outputs, x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 250 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (250, 4), (4, 1)) assert_size_stride(primals_3, (250,), (1,)) assert_size_stride(primals_4, (100, 250), (250, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 250), (250, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 250), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1000)](buf1, primals_3, 1000, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (250, 100), ( 1, 250), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, buf3, primals_1, buf1, buf3, primals_6, primals_4 class MLPNew(nn.Module): def __init__(self, input_dim, output_dim, dropout=0.5): super(MLPNew, self).__init__() self.input_fc = nn.Linear(input_dim, 250) self.hidden_fc = nn.Linear(250, 100) self.output_fc = nn.Linear(100, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.input_fc.weight primals_3 = self.input_fc.bias primals_4 = self.hidden_fc.weight primals_5 = self.hidden_fc.bias primals_6 = self.output_fc.weight primals_7 = self.output_fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
CrispenGari/pneumonia-infection
MLP
false
17,163
[ "MIT" ]
4
8d1fc5f61aa8c4eb06d640e6da5abbbe23ccb85e
https://github.com/CrispenGari/pneumonia-infection/tree/8d1fc5f61aa8c4eb06d640e6da5abbbe23ccb85e
OutConv_Sigmoid
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gz/cgzv3hdnxfurdwdgyk6w62pjceimmd4fwbpanhcfrvm3kb6nciba.py # Topologically Sorted Source Nodes: [conv2d, sigmoid], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # conv2d => convolution # sigmoid => sigmoid # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, sigmoid], Original ATen: [aten.convolution, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_convolution_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class OutConv_Sigmoid(nn.Module): def __init__(self, in_channels, out_channels): super(OutConv_Sigmoid, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): return self.sigmoid(self.conv(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf1 class OutConv_SigmoidNew(nn.Module): def __init__(self, in_channels, out_channels): super(OutConv_SigmoidNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Curli-quan/oneshot-medical-landmark
OutConv_Sigmoid
false
17,164
[ "Apache-2.0" ]
7
572926077fffbe9832aa16baa98bd046ec326700
https://github.com/Curli-quan/oneshot-medical-landmark/tree/572926077fffbe9832aa16baa98bd046ec326700
QNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py # Topologically Sorted Source Nodes: [x_state_action], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_state_action => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qk/cqk5cnmrffdyh6x4yseboivxypitwp75t6ywqxjlpiff6wcgokiw.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x1 => relu # Graph fragment: # %add_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_5, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_5,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1, ), (1, )) assert_size_stride(primals_11, (4, 8), (8, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (1, 4), (4, 1)) assert_size_stride(primals_18, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_state_action], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf5) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x1_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf6, primals_8, 16, grid=grid(16), stream=stream0) del primals_8 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x1_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf9) del primals_11 buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf10, primals_12, 16, grid=grid(16), stream=stream0) del primals_12 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf10, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf12, primals_14, 16, grid=grid(16), stream=stream0) del primals_14 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf13) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [x2_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf14, primals_16, 16, grid=grid(16), stream=stream0) del primals_16 buf16 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf14, reinterpret_tensor(primals_17, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_18 return (buf8, buf16, buf0, buf2, buf4, buf6, buf10, buf12, buf14, primals_17, primals_15, primals_13, primals_9, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class QNetwork(nn.Module): def __init__(self, state_dim, action_dim, hidden_dim): super(QNetwork, self).__init__() self.linear1_q1 = nn.Linear(state_dim + action_dim, hidden_dim) self.linear2_q1 = nn.Linear(hidden_dim, hidden_dim) self.linear3_q1 = nn.Linear(hidden_dim, hidden_dim) self.linear4_q1 = nn.Linear(hidden_dim, 1) self.linear1_q2 = nn.Linear(state_dim + action_dim, hidden_dim) self.linear2_q2 = nn.Linear(hidden_dim, hidden_dim) self.linear3_q2 = nn.Linear(hidden_dim, hidden_dim) self.linear4_q2 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, state, action): x_state_action = torch.cat([state, action], 1) x1 = F.relu(self.linear1_q1(x_state_action)) x1 = F.relu(self.linear2_q1(x1)) x1 = F.relu(self.linear3_q1(x1)) x1 = self.linear4_q1(x1) x2 = F.relu(self.linear1_q2(x_state_action)) x2 = F.relu(self.linear2_q2(x2)) x2 = F.relu(self.linear3_q2(x2)) x2 = self.linear4_q2(x2) return x1, x2 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 8), (8, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (1, 4), (4, 1)) assert_size_stride(primals_18, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4 ), 0), out=buf5) buf6 = buf5 del buf5 triton_poi_fused_relu_1[grid(16)](buf6, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf9) del primals_11 buf10 = buf9 del buf9 triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(16)](buf12, primals_14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_1[grid(16)](buf14, primals_16, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_16 buf16 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_18, buf14, reinterpret_tensor( primals_17, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_18 return (buf8, buf16, buf0, buf2, buf4, buf6, buf10, buf12, buf14, primals_17, primals_15, primals_13, primals_9, primals_7, primals_5) def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) class QNetworkNew(nn.Module): def __init__(self, state_dim, action_dim, hidden_dim): super(QNetworkNew, self).__init__() self.linear1_q1 = nn.Linear(state_dim + action_dim, hidden_dim) self.linear2_q1 = nn.Linear(hidden_dim, hidden_dim) self.linear3_q1 = nn.Linear(hidden_dim, hidden_dim) self.linear4_q1 = nn.Linear(hidden_dim, 1) self.linear1_q2 = nn.Linear(state_dim + action_dim, hidden_dim) self.linear2_q2 = nn.Linear(hidden_dim, hidden_dim) self.linear3_q2 = nn.Linear(hidden_dim, hidden_dim) self.linear4_q2 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, input_0, input_1): primals_3 = self.linear1_q1.weight primals_4 = self.linear1_q1.bias primals_1 = self.linear2_q1.weight primals_6 = self.linear2_q1.bias primals_2 = self.linear3_q1.weight primals_8 = self.linear3_q1.bias primals_9 = self.linear4_q1.weight primals_10 = self.linear4_q1.bias primals_11 = self.linear1_q2.weight primals_12 = self.linear1_q2.bias primals_5 = self.linear2_q2.weight primals_14 = self.linear2_q2.bias primals_7 = self.linear3_q2.weight primals_16 = self.linear3_q2.bias primals_17 = self.linear4_q2.weight primals_18 = self.linear4_q2.bias primals_13 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0], output[1]
Crawford-fang/ROS_pytorch_RL
QNetwork
false
17,165
[ "Apache-2.0" ]
10
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
LayerNormalization
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fi/cfirwebzg3c53rtcua4vzlldpuixnutokbath2cbd6sgyitlgnch.py # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # mean => mean # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jl/cjlp73exwplrn4y7uqb3o7fby2ie33pg73fcowoc45das77t25rj.py # Topologically Sorted Source Nodes: [pow_1, var, add, std, y, y_1, y_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # pow_1 => pow_1 # std => sqrt # var => mean_1 # y => div # y_1 => mul # y_2 => add_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-10), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_pow_sqrt_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp0 / tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, var, add, std, y, y_1, y_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_pow_sqrt_1.run(buf0, primals_2, primals_3, buf1, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 del primals_3 return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNormalization(nn.Module): def __init__(self, normal_shape, gamma=True, beta=True, epsilon=1e-10): """Layer normalization layer See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf) :param normal_shape: The shape of the input tensor or the last dimension of the input tensor. :param gamma: Add a scale parameter if it is True. :param beta: Add an offset parameter if it is True. :param epsilon: Epsilon for calculating variance. """ super(LayerNormalization, self).__init__() if isinstance(normal_shape, int): normal_shape = normal_shape, else: normal_shape = normal_shape[-1], self.normal_shape = torch.Size(normal_shape) self.epsilon = epsilon if gamma: self.gamma = nn.Parameter(torch.Tensor(*normal_shape)) else: self.register_parameter('gamma', None) if beta: self.beta = nn.Parameter(torch.Tensor(*normal_shape)) else: self.register_parameter('beta', None) self.reset_parameters() def reset_parameters(self): if self.gamma is not None: self.gamma.data.fill_(1) if self.beta is not None: self.beta.data.zero_() def forward(self, x): mean = x.mean(dim=-1, keepdim=True) var = ((x - mean) ** 2).mean(dim=-1, keepdim=True) std = (var + self.epsilon).sqrt() y = (x - mean) / std if self.gamma is not None: y *= self.gamma if self.beta is not None: y += self.beta return y def extra_repr(self): return 'normal_shape={}, gamma={}, beta={}, epsilon={}'.format(self .normal_shape, self.gamma is not None, self.beta is not None, self.epsilon) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'normal_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp0 / tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](buf0, primals_2, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class LayerNormalizationNew(nn.Module): def __init__(self, normal_shape, gamma=True, beta=True, epsilon=1e-10): """Layer normalization layer See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf) :param normal_shape: The shape of the input tensor or the last dimension of the input tensor. :param gamma: Add a scale parameter if it is True. :param beta: Add an offset parameter if it is True. :param epsilon: Epsilon for calculating variance. """ super(LayerNormalizationNew, self).__init__() if isinstance(normal_shape, int): normal_shape = normal_shape, else: normal_shape = normal_shape[-1], self.normal_shape = torch.Size(normal_shape) self.epsilon = epsilon if gamma: self.gamma = nn.Parameter(torch.Tensor(*normal_shape)) else: self.register_parameter('gamma', None) if beta: self.beta = nn.Parameter(torch.Tensor(*normal_shape)) else: self.register_parameter('beta', None) self.reset_parameters() def reset_parameters(self): if self.gamma is not None: self.gamma.data.fill_(1) if self.beta is not None: self.beta.data.zero_() def extra_repr(self): return 'normal_shape={}, gamma={}, beta={}, epsilon={}'.format(self .normal_shape, self.gamma is not None, self.beta is not None, self.epsilon) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CyberZHG/torch-layer-normalization
LayerNormalization
false
17,166
[ "MIT" ]
9
89f405b60f53f85da6f03fe685c190ef394ce50c
https://github.com/CyberZHG/torch-layer-normalization/tree/89f405b60f53f85da6f03fe685c190ef394ce50c
DQN_hot2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/k3/ck3ieblgb2bkhcothrorqkt6ebrp6rlmfh7d4mncinldyos5gvuj.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (100, 16), (16, 1)) assert_size_stride(primals_3, (100, ), (1, )) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 100), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 400, grid=grid(400), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data class DQN_hot2(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot2, self).__init__() self.fc1 = nn.Linear(m * n, 100) self.fc2 = nn.Linear(100, num_actions) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) return self.fc2(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (100, 16), (16, 1)) assert_size_stride(primals_3, (100,), (1,)) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), reinterpret_tensor(primals_2, (16, 100), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(400)](buf1, primals_3, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), buf1, primals_4 class DQN_hot2New(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot2New, self).__init__() self.fc1 = nn.Linear(m * n, 100) self.fc2 = nn.Linear(100, num_actions) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CoAxLab/azad
DQN_hot2
false
17,167
[ "MIT" ]
6
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
PolicyNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xx/cxx7te6xswezhkaicpagyhd72nytaw2elc2vbskcmpij23vw77p7.py # Topologically Sorted Source Nodes: [softplus, tanh, mul, x], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] # Source node to ATen node mapping: # mul => mul # softplus => exp, gt, log1p, where # tanh => tanh # x => clamp_max # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %log1p), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %tanh), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%mul, 6), kwargs = {}) triton_poi_fused_clamp_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_clamp_mul_softplus_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_mul_softplus_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/do/cdozqnt37femkrifsvsmbt4gxljrnvhyti5lxs4c7v7ozpp2ikj2.py # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # log_std_1 => clamp_max_2, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_7, -10), kwargs = {}) # %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_7, -10), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_7, 2), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_ge_le_logical_and_1 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -10.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, tanh, mul, x], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_mul_softplus_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_1, tanh_1, mul_1, x_1], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] triton_poi_fused_clamp_mul_softplus_tanh_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_ge_le_logical_and_1.run(buf5, primals_9, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del primals_9 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf7, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn from torch.distributions import Normal def mish(x): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function https://arxiv.org/abs/1908.08681v1 implemented for PyTorch / FastAI by lessw2020 https://github.com/lessw2020/mish param: x: output of a layer of a neural network return: mish activation function """ return torch.clamp(x * torch.tanh(F.softplus(x)), max=6) class PolicyNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003, log_std_min=-10, log_std_max=2): super(PolicyNetwork, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear1.weight.data.uniform_(-init_w, init_w) self.linear1.bias.data.uniform_(-init_w, init_w) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear2.weight.data.uniform_(-init_w, init_w) self.linear2.bias.data.uniform_(-init_w, init_w) self.mean_linear = nn.Linear(hidden_size, num_actions) self.mean_linear.weight.data.uniform_(-init_w, init_w) self.mean_linear.bias.data.uniform_(-init_w, init_w) self.log_std_linear = nn.Linear(hidden_size, num_actions) self.log_std_linear.weight.data.uniform_(-init_w, init_w) self.log_std_linear.bias.data.uniform_(-init_w, init_w) def forward(self, state): x = mish(self.linear1(state)) x = mish(self.linear2(x)) mean = self.mean_linear(x) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max) return mean, log_std def evaluate(self, state, epsilon=1e-06): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) z = normal.sample() action = torch.tanh(z) log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon) log_prob = log_prob.sum(-1, keepdim=True) return action, log_prob, z, mean, log_std def get_action(self, state, exploitation=False): state = torch.FloatTensor(state).unsqueeze(0) mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) z = normal.sample() action = torch.tanh(z) if exploitation: action = torch.tanh(mean) action = action.detach().numpy() return action[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -10.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf5, primals_9, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_9 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf7, primals_8, primals_6, primals_4 def mish(x): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function https://arxiv.org/abs/1908.08681v1 implemented for PyTorch / FastAI by lessw2020 https://github.com/lessw2020/mish param: x: output of a layer of a neural network return: mish activation function """ return torch.clamp(x * torch.tanh(F.softplus(x)), max=6) class PolicyNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003, log_std_min=-10, log_std_max=2): super(PolicyNetworkNew, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.linear1 = nn.Linear(num_inputs, hidden_size) self.linear1.weight.data.uniform_(-init_w, init_w) self.linear1.bias.data.uniform_(-init_w, init_w) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear2.weight.data.uniform_(-init_w, init_w) self.linear2.bias.data.uniform_(-init_w, init_w) self.mean_linear = nn.Linear(hidden_size, num_actions) self.mean_linear.weight.data.uniform_(-init_w, init_w) self.mean_linear.bias.data.uniform_(-init_w, init_w) self.log_std_linear = nn.Linear(hidden_size, num_actions) self.log_std_linear.weight.data.uniform_(-init_w, init_w) self.log_std_linear.bias.data.uniform_(-init_w, init_w) def evaluate(self, state, epsilon=1e-06): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) z = normal.sample() action = torch.tanh(z) log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon) log_prob = log_prob.sum(-1, keepdim=True) return action, log_prob, z, mean, log_std def get_action(self, state, exploitation=False): state = torch.FloatTensor(state).unsqueeze(0) mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) z = normal.sample() action = torch.tanh(z) if exploitation: action = torch.tanh(mean) action = action.detach().numpy() return action[0] def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.mean_linear.weight primals_7 = self.mean_linear.bias primals_8 = self.log_std_linear.weight primals_9 = self.log_std_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
Crawford-fang/ROS_pytorch_RL
PolicyNetwork
false
17,168
[ "Apache-2.0" ]
10
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
UnaryPrimitivesPredefined_v2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/g5/cg5itricuk36n25hqnqam7zr6ev2maqtyaq226ioova22and6nsp.py # Topologically Sorted Source Nodes: [padded_states], Original ATen: [aten.cat] # Source node to ATen node mapping: # padded_states => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %primals_1, %sub_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 6 x0 = xindex % 16 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 - tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 5, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp15 & xmask, other=0.0) tmp17 = tmp0 >= tmp13 tmp18 = tl.full([1], 6, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 * tmp6 tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 - tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp17, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp16, tmp25) tmp27 = tl.where(tmp4, tmp11, tmp26) tl.store(out_ptr0 + (x3), tmp27, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7k/c7kzgke7qljlvip3pfd6abfk45nesyqx4npxnxmrvhmdd2hmmj7i.py # Topologically Sorted Source Nodes: [padded_states_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # padded_states_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_3, %div, %sub_4], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 6 x0 = xindex % 16 x2 = (xindex // 96) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl.load(in_ptr0 + (48 + x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (16 + x0 + (16*x1) + (96*x2)), tmp4 & xmask, other=0.0) tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp8 tmp16 = tmp11 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr0 + (32 + x0 + (16*((-1) + x1)) + (96*x2)), tmp22 & xmask, other=0.0) tmp24 = tl.load(in_ptr0 + (x0 + (16*((-1) + x1)) + (96*x2)), tmp22 & xmask, other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tmp25 * tmp8 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp22, tmp26, tmp27) tmp29 = tmp0 >= tmp20 tmp30 = tl.full([1], 6, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tl.load(in_ptr0 + (80 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp33 = tl.load(in_ptr0 + (48 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp8 tmp36 = tmp35 * tmp10 tmp37 = tl.load(in_ptr0 + (64 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (32 + x0 + (16*((-5) + x1)) + (96*x2)), tmp29 & xmask, other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tmp39 * tmp8 tmp41 = tmp36 - tmp40 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp29, tmp41, tmp42) tmp44 = tl.where(tmp22, tmp28, tmp43) tmp45 = tl.where(tmp4, tmp18, tmp44) tl.store(out_ptr0 + (x3), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/z3/cz3pwn54uhea45tawvmat7z24kp3i7r3h6ipgb3wvdjezyxtspaj.py # Topologically Sorted Source Nodes: [sub_7, states_expand_1], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # states_expand_1 => div_2 # sub_7 => sub_7 # Graph fragment: # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %select_2), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_7, %select_3), kwargs = {}) triton_poi_fused_div_sub_2 = async_compile.triton('triton_poi_fused_div_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 4 x2 = (xindex // 12) x3 = xindex tmp0 = tl.load(in_ptr0 + (36 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (37 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (5 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (38 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (6 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (39 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (7 + (4*x0) + (16*x1) + (96*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (0)) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr1 + (1)) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp27 = tmp24 - tmp26 tmp30 = tmp27 / tmp29 tl.store(in_out_ptr0 + (x3), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yt/cytkvmmxlhdqedwc6u4ud7tgyjuag5a6bpisop6vsocyyunstzkm.py # Topologically Sorted Source Nodes: [sub_9, sub_10, add, states_expand_3], Original ATen: [aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # add => add # states_expand_3 => div_4 # sub_10 => sub_10 # sub_9 => sub_9 # Graph fragment: # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %select_4), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_5, %select_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_10, 1e-05), kwargs = {}) # %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_9, %add), kwargs = {}) triton_poi_fused_add_div_sub_3 = async_compile.triton('triton_poi_fused_add_div_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) x2 = xindex tmp0 = tl.load(in_ptr0 + (6 + (4*x0) + (16*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + (1)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp5 - tmp2 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/62/c62e35q3rdmqqq6s3qceox2fajhiggkqetzkvc3resgwfwiiq45u.py # Topologically Sorted Source Nodes: [sub_14, states_expand_7], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # states_expand_7 => div_8 # sub_14 => sub_14 # Graph fragment: # %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_8, %select_8), kwargs = {}) # %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_14, %select_9), kwargs = {}) triton_poi_fused_div_sub_4 = async_compile.triton('triton_poi_fused_div_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 4 x2 = (xindex // 12) x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + (4*x0) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((4*((1 + x0) // 4)) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (5 + (4*x0) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*((1 + x0) // 4)) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (6 + (4*x0) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*((1 + x0) // 4)) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (7 + (4*x0) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*((1 + x0) // 4)) + (16*x1) + (16*((1 + x0) // 16)) + (64*x2) + (64*((1 + x0 + (16*x1)) // 64))), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (0)) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp23 = tl.load(in_ptr1 + (1)) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp22 = tmp19 - tmp21 tmp25 = tmp22 / tmp24 tl.store(in_out_ptr0 + (x3), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ge/cgewca5yn73hxjleyjbpgfrxiuip3spcg6pduarvudlprev3qlry.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] # Source node to ATen node mapping: # output => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sigmoid, %sigmoid_1, %sigmoid_2, %sigmoid_3], -1), kwargs = {}) triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 40 x1 = (xindex // 40) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.sigmoid(tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tmp14 = tl.full([1], 20, tl.int64) tmp15 = tmp0 < tmp14 tmp16 = tmp13 & tmp15 tmp17 = tl.load(in_ptr2 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr3 + ((-10) + x0), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp8 tmp21 = tl.sigmoid(tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tmp0 >= tmp14 tmp25 = tl.full([1], 30, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tmp24 & tmp26 tmp28 = tl.load(in_ptr4 + (x1), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr5 + ((-20) + x0), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp28 - tmp29 tmp31 = tmp30 * tmp8 tmp32 = tl.sigmoid(tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp27, tmp32, tmp33) tmp35 = tmp0 >= tmp25 tmp36 = tl.full([1], 40, tl.int64) tmp37 = tmp0 < tmp36 tmp38 = tl.load(in_ptr6 + (x1), tmp35 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.load(in_ptr7 + ((-30) + x0), tmp35 & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tmp38 - tmp39 tmp41 = tmp40 * tmp8 tmp42 = tl.sigmoid(tmp41) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp35, tmp42, tmp43) tmp45 = tl.where(tmp27, tmp34, tmp44) tmp46 = tl.where(tmp16, tmp23, tmp45) tmp47 = tl.where(tmp4, tmp12, tmp46) tl.store(out_ptr0 + (x2), tmp47, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (10, ), (1, )) assert_size_stride(primals_4, (2, ), (1, )) assert_size_stride(primals_5, (10, ), (1, )) assert_size_stride(primals_6, (2, ), (1, )) assert_size_stride(primals_7, (10, ), (1, )) assert_size_stride(primals_8, (2, ), (1, )) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_states], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0) buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [padded_states_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf0, buf1, 384, grid=grid(384), stream=stream0) buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 3, 1), (12, 3, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [sub_7, states_expand_1], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_2.run(buf3, buf1, primals_2, 48, grid=grid(48), stream=stream0) del buf1 del primals_2 buf4 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_9, sub_10, add, states_expand_3], Original ATen: [aten.sub, aten.add, aten.div] triton_poi_fused_add_div_sub_3.run(primals_1, primals_4, buf4, 48, grid=grid(48), stream=stream0) del primals_4 buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32) buf6 = reinterpret_tensor(buf5, (4, 4, 3, 1), (12, 3, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [sub_12, states_expand_5], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_2.run(buf6, buf0, primals_6, 48, grid=grid(48), stream=stream0) del buf0 del primals_6 buf7 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32) buf8 = reinterpret_tensor(buf7, (4, 4, 3, 1), (12, 3, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [sub_14, states_expand_7], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_4.run(buf8, primals_1, primals_8, 48, grid=grid(48), stream=stream0) del primals_1 del primals_8 buf9 = empty_strided_cuda((4, 4, 3, 40), (480, 120, 40, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat] triton_poi_fused_cat_5.run(buf3, primals_3, buf4, primals_5, buf6, primals_7, buf8, primals_9, buf9, 1920, grid=grid(1920), stream=stream0) return (buf9, primals_3, primals_5, primals_7, primals_9, buf3, buf4, buf6, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) class UnaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.differential = AlignDifferential() self.primitive_list = ['acc', 'pos_z', 'speed', 'dist_to_ball'] self.distance = Distance() self.ineqs.update({'acc': Inequality(out_dim=cmp_dim, distribution= 'normal'), 'pos_z': Inequality(out_dim=cmp_dim, distribution= 'uniform'), 'speed': Inequality(out_dim=cmp_dim, distribution= 'normal'), 'dist_to_ball': Inequality(out_dim=cmp_dim, distribution='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, state_dim] return [batch, length, n_agents, out_dim] """ velocity = self.differential(states) acc = self.differential(velocity) n_agents = states.size(2) p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1) p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1) dist = self.distance(p1, p2).squeeze(4) ineqs_inputs = {'pos_z': states[:, :, 1:, 2], 'speed': torch.norm( velocity[:, :, 1:, :], p=2, dim=3), 'acc': torch.norm(acc[:, :, 1:, :], p=2, dim=3), 'dist_to_ball': dist[:, :, 0, 1:]} output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for k in self.primitive_list], dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 - tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 5, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 & xmask, other=0.0) tmp17 = tmp0 >= tmp13 tl.full([1], 6, tl.int64) tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 * tmp6 tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 - tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp17, tmp23, tmp24) tmp26 = tl.where(tmp15, tmp16, tmp25) tmp27 = tl.where(tmp4, tmp11, tmp26) tl.store(out_ptr0 + x3, tmp27, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl.load(in_ptr0 + (48 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (16 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0) tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp8 tmp16 = tmp11 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr0 + (32 + x0 + 16 * (-1 + x1) + 96 * x2), tmp22 & xmask, other=0.0) tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 96 * x2), tmp22 & xmask, other=0.0) tmp25 = tmp23 - tmp24 tmp26 = tmp25 * tmp8 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp22, tmp26, tmp27) tmp29 = tmp0 >= tmp20 tl.full([1], 6, tl.int64) tmp32 = tl.load(in_ptr0 + (80 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp33 = tl.load(in_ptr0 + (48 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp8 tmp36 = tmp35 * tmp10 tmp37 = tl.load(in_ptr0 + (64 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (32 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 & xmask, other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tmp39 * tmp8 tmp41 = tmp36 - tmp40 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp29, tmp41, tmp42) tmp44 = tl.where(tmp22, tmp28, tmp43) tmp45 = tl.where(tmp4, tmp18, tmp44) tl.store(out_ptr0 + x3, tmp45, xmask) @triton.jit def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 4 x2 = xindex // 12 x3 = xindex tmp0 = tl.load(in_ptr0 + (36 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (37 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (5 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (38 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (39 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (7 + 4 * x0 + 16 * x1 + 96 * x2), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + 0) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr1 + 1) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp27 = tmp24 - tmp26 tmp30 = tmp27 / tmp29 tl.store(in_out_ptr0 + x3, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1), xmask, eviction_policy ='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + 1) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp6 = tmp5 - tmp2 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_div_sub_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 4 x2 = xindex // 12 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 + x0 ) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (5 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (7 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (3 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 + x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + 0) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp23 = tl.load(in_ptr1 + 1) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp22 = tmp19 - tmp21 tmp25 = tmp22 / tmp24 tl.store(in_out_ptr0 + x3, tmp25, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 40 x1 = xindex // 40 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 10, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.sigmoid(tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tmp14 = tl.full([1], 20, tl.int64) tmp15 = tmp0 < tmp14 tmp16 = tmp13 & tmp15 tmp17 = tl.load(in_ptr2 + x1, tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tl.load(in_ptr3 + (-10 + x0), tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp8 tmp21 = tl.sigmoid(tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tmp0 >= tmp14 tmp25 = tl.full([1], 30, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tmp24 & tmp26 tmp28 = tl.load(in_ptr4 + x1, tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp29 = tl.load(in_ptr5 + (-20 + x0), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp28 - tmp29 tmp31 = tmp30 * tmp8 tmp32 = tl.sigmoid(tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp27, tmp32, tmp33) tmp35 = tmp0 >= tmp25 tl.full([1], 40, tl.int64) tmp38 = tl.load(in_ptr6 + x1, tmp35 & xmask, eviction_policy= 'evict_last', other=0.0) tmp39 = tl.load(in_ptr7 + (-30 + x0), tmp35 & xmask, eviction_policy= 'evict_last', other=0.0) tmp40 = tmp38 - tmp39 tmp41 = tmp40 * tmp8 tmp42 = tl.sigmoid(tmp41) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp35, tmp42, tmp43) tmp45 = tl.where(tmp27, tmp34, tmp44) tmp46 = tl.where(tmp16, tmp23, tmp45) tmp47 = tl.where(tmp4, tmp12, tmp46) tl.store(out_ptr0 + x2, tmp47, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (2,), (1,)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (2,), (1,)) assert_size_stride(primals_7, (10,), (1,)) assert_size_stride(primals_8, (2,), (1,)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(384)](buf0, buf1, 384, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 3, 1), (12, 3, 1, 1), 0) del buf2 triton_poi_fused_div_sub_2[grid(48)](buf3, buf1, primals_2, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_2 buf4 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32) triton_poi_fused_add_div_sub_3[grid(48)](primals_1, primals_4, buf4, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32) buf6 = reinterpret_tensor(buf5, (4, 4, 3, 1), (12, 3, 1, 1), 0) del buf5 triton_poi_fused_div_sub_2[grid(48)](buf6, buf0, primals_6, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_6 buf7 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32) buf8 = reinterpret_tensor(buf7, (4, 4, 3, 1), (12, 3, 1, 1), 0) del buf7 triton_poi_fused_div_sub_4[grid(48)](buf8, primals_1, primals_8, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_8 buf9 = empty_strided_cuda((4, 4, 3, 40), (480, 120, 40, 1), torch. float32) triton_poi_fused_cat_5[grid(1920)](buf3, primals_3, buf4, primals_5, buf6, primals_7, buf8, primals_9, buf9, 1920, XBLOCK=128, num_warps=4, num_stages=1) return (buf9, primals_3, primals_5, primals_7, primals_9, buf3, buf4, buf6, buf8) def apply_last_dim(model, x): size = list(x.size()) y = model(x.contiguous().view(-1, size[-1])) size[-1] = y.size(-1) y = y.view(torch.Size(size)) return y def get_int_dim_index(name): if isinstance(name, int): return name name_list = 'axyz' assert name in name_list return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1 class Length(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index def forward(self, states, dim_index=None): if dim_index is None: dim_index = self.dim_index if isinstance(dim_index, int): dim_index = [dim_index] else: dim_index = [get_int_dim_index(x) for x in dim_index] if -1 in dim_index: def extractor(x): return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) else: def extractor(x): return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1, keepdim=True)) return apply_last_dim(extractor, states) def show(self, name='Length', indent=0, log=print, **kwargs): log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self. dim_index))) class Distance(nn.Module): def __init__(self, dim_index=-1): super().__init__() self.dim_index = dim_index self.length = Length(dim_index) def forward(self, states1, states2, dim_index=None): return self.length(states1 - states2, dim_index) def show(self, name='Distance', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name) class Normalize(nn.Module): def __init__(self, distribution=None, **kwargs): super().__init__() self.distribution = distribution self.data_ = [] if distribution is None: pass elif distribution == 'normal': mean = kwargs['mean'] if 'mean' in kwargs else 0 std = kwargs['std'] if 'std' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([mean, std]), False) elif distribution == 'uniform': vmin = kwargs['minv'] if 'minv' in kwargs else 0 vmax = kwargs['maxv'] if 'maxv' in kwargs else 1 self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False) else: raise NotImplementedError() def forward(self, x, keep_data=False): if keep_data: self.data_.append(x.detach().cpu().view(-1)) return x if self.distribution is None: return x elif self.distribution == 'normal': mean = self.param[0] std = self.param[1] return (x - mean) / std elif self.distribution == 'uniform': vmin = self.param[0] vmax = self.param[1] return (x - vmin) / (vmax - vmin + 1e-05) else: raise NotImplementedError() def reset_parameters(self, name=None): assert len(self.data_) > 0 data = torch.cat(self.data_) self.data_ = [] if self.distribution is None: pass elif self.distribution == 'normal': with torch.no_grad(): self.param[0] = data.mean().item() self.param[1] = data.std().item() if name is not None: None elif self.distribution == 'uniform': with torch.no_grad(): self.param[0] = data.min().item() self.param[1] = data.max().item() if name is not None: None else: raise NotImplementedError() def recover_threshold(self, x): if self.distribution is None: return x elif self.distribution == 'normal': return x * float(self.param[1]) + float(self.param[0]) elif self.distribution == 'uniform': return x * float(self.param[1] - self.param[0] + 1e-05) + float( self.param[0]) else: raise NotImplementedError() def init_thresholds(self, x): if self.distribution is None: nn.init.normal_(x, 0, 1) elif self.distribution == 'normal': nn.init.normal_(x, 0, 1) elif self.distribution == 'uniform': nn.init.uniform_(x, 0, 1) else: raise NotImplementedError() class SoftCmp(nn.Module): """ Sigmoid((x - y) / e^beta) """ def __init__(self): super().__init__() self.sigmoid = nn.Sigmoid() def forward(self, x, y, beta): return self.sigmoid((x - y) / math.exp(beta)) class Inequality(nn.Module): def __init__(self, out_dim=1, distribution=None, **kwargs): super().__init__() self.out_dim = out_dim self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True ) self.distribution = distribution self.normalize = Normalize(distribution) self.cmp = SoftCmp() self.normalize.init_thresholds(self.thresholds) def forward(self, states, beta=0, **kwargs): """ :param states: [batch, length, n_agents, ... ] """ states_expand = states.view(*(states.size() + (1,))) estimate_parameters = 'estimate_parameters' in kwargs and kwargs[ 'estimate_parameters'] states_expand = self.normalize(states_expand, keep_data= estimate_parameters) return self.cmp(states_expand, self.thresholds.view(*([1] * len( states.size()) + [self.out_dim])), beta) def reset_parameters(self, parameter_name, name=None): if parameter_name == 'primitive_inequality': self.normalize.reset_parameters(name=name) self.normalize.init_thresholds(self.thresholds) def get_descriptions(self, name='Inequality'): theta = self.thresholds.detach().cpu().view(self.out_dim) descroptions = [] for k in range(theta.size(0)): t = self.normalize.recover_threshold(theta[k]) if 'speed' in name: t = t * 8 if 'acc' in name: t = t * 64 descroptions.append('%s > %.2lf' % (name, t)) return descroptions class N_aryPrimitivesPredefined(nn.Module): def __init__(self): super().__init__() self.out_dim = 0 self.primitive_list = [] self.ineqs = nn.ModuleDict({}) def reset_parameters(self, parameter_name): for k in self.primitive_list: self.ineqs[k].reset_parameters(parameter_name, name=k) def get_descriptions(self): descriptions = [] for k in self.primitive_list: descriptions += self.ineqs[k].get_descriptions(name=k) return descriptions class AlignDifferential(nn.Module): def __init__(self): super().__init__() def new_length(self, length): return length def forward(self, states): """ :param states: [batch, length, *] """ padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2], states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1) return (padded_states[:, 2:] - padded_states[:, :-2]) / 2 def show(self, name='AlignDifferential', indent=0, log=print, **kwargs): log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,)) class UnaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined): def __init__(self, cmp_dim=10): super().__init__() self.differential = AlignDifferential() self.primitive_list = ['acc', 'pos_z', 'speed', 'dist_to_ball'] self.distance = Distance() self.ineqs.update({'acc': Inequality(out_dim=cmp_dim, distribution= 'normal'), 'pos_z': Inequality(out_dim=cmp_dim, distribution= 'uniform'), 'speed': Inequality(out_dim=cmp_dim, distribution= 'normal'), 'dist_to_ball': Inequality(out_dim=cmp_dim, distribution='normal')}) self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list] ) def forward(self, input_0): primals_3 = self.ineqs.acc.thresholds primals_2 = self.ineqs.acc.normalize.param primals_5 = self.ineqs.pos_z.thresholds primals_4 = self.ineqs.pos_z.normalize.param primals_7 = self.ineqs.speed.thresholds primals_6 = self.ineqs.speed.normalize.param primals_9 = self.ineqs.dist_to_ball.thresholds primals_8 = self.ineqs.dist_to_ball.normalize.param primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
C-SUNSHINE/TOQ-Nets-PyTorch-Release
UnaryPrimitivesPredefined_v2
false
17,169
[ "MIT" ]
6
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
SELayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yg/cygooswl5gkxugqq2ejgag2vtcqhtumn2j3notsgzty3xoxbrq4v.py # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xr/cxrbkvclbwezn4blkxettbfalyh5q4bsuxhlcnje4eedoho3alx4.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # y_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/bf/cbfuttf7dazvdy6urg5w4rfzlxttv7rtpxx6xd57dlwpqas7kmzc.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf3, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F import torch.optim import torch.backends.cudnn class SELayer(nn.Module): def __init__(self, in_channels, reduction): super(SELayer, self).__init__() mid_channels = in_channels // reduction self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, in_channels) def forward(self, x): n_batches, n_channels, _, _ = x.size() y = F.adaptive_avg_pool2d(x, output_size=1).view(n_batches, n_channels) y = F.relu(self.fc1(y), inplace=True) y = F.sigmoid(self.fc2(y)).view(n_batches, n_channels, 1, 1) return x * y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'reduction': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.optim import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), buf3, buf4, primals_4 class SELayerNew(nn.Module): def __init__(self, in_channels, reduction): super(SELayerNew, self).__init__() mid_channels = in_channels // reduction self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, in_channels) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CrazyStoneonRoad/pytorch_image_classification
SELayer
false
17,170
[ "MIT" ]
4
1dcf6d0ee8f4a102ca93cc6e5e325a2e9153918b
https://github.com/CrazyStoneonRoad/pytorch_image_classification/tree/1dcf6d0ee8f4a102ca93cc6e5e325a2e9153918b
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fa/cfathejnvzkx3qrb4rznotdd3umxo2wf7gor3e4eyd2r2psxfspb.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_6, 0.7071067811865476), kwargs = {}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x4 = xindex tmp0 = x3 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x0) + (16*(x1 + (4*x2)))), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + (4*x0) + (16*((-4) + x1 + (4*x2)))), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + (4*x0) + (16*((-8) + x1 + (4*x2)))), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (3 + (4*x0) + (16*((-12) + x1 + (4*x2)))), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = 0.7071067811865476 tmp24 = tmp22 * tmp23 tl.store(out_ptr0 + (x4), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xv/cxv5zlyqldhwuctiaey5xrrtrmgro2ckmgkb3xaym5udlyzstvai.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jt/cjteqzpjjfp5f57sg6ohk5xnzwbndntoiin2wxevaquyjslzne6f.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/rs/crscuws4z5axp2mdb5pzyrvpd7krd3gem3zbu63rc6nn2k7gc33b.py # Topologically Sorted Source Nodes: [values_1], Original ATen: [aten.stack] # Source node to ATen node mapping: # values_1 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_8, %getitem_9, %getitem_10, %getitem_11],), kwargs = {}) triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x0) + (16*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + (4*x0) + (16*((-4) + x1))), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + (4*x0) + (16*((-8) + x1))), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (3 + (4*x0) + (16*((-12) + x1))), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vw/cvw7cind7evdema3nchj6bnko225byppv5y4ilfz7jc6qzp55czn.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_12, %getitem_13, %getitem_14, %getitem_15], 3), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (32 + x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (48 + x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [querys], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [keys], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, buf3, 64, grid=grid(64), stream=stream0) buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [values_1], Original ATen: [aten.stack] triton_poi_fused_stack_3.run(buf2, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(buf9, buf10, 64, grid=grid(64), stream=stream0) del buf9 return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class MultiHeadAttention(nn.Module): """ input: query --- [N, T_q, query_dim] key --- [N, T_k, key_dim] output: out --- [N, T_q, num_units] """ def __init__(self, query_dim, key_dim, num_units, num_heads): super().__init__() self.num_units = num_units self.num_heads = num_heads self.key_dim = key_dim self.W_query = nn.Linear(in_features=query_dim, out_features= num_units, bias=False) self.W_key = nn.Linear(in_features=key_dim, out_features=num_units, bias=False) self.W_value = nn.Linear(in_features=key_dim, out_features= num_units, bias=False) def forward(self, query, key): querys = self.W_query(query) keys = self.W_key(key) values = self.W_value(key) split_size = self.num_units // self.num_heads querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0) keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0) values = torch.stack(torch.split(values, split_size, dim=2), dim=0) scores = torch.matmul(querys, keys.transpose(2, 3)) scores = scores / self.key_dim ** 0.5 scores = F.softmax(scores, dim=3) out = torch.matmul(scores, values) out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'query_dim': 4, 'key_dim': 4, 'num_units': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x4 = xindex tmp0 = x3 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * (x1 + 4 * x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1 + 4 * x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1 + 4 * x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1 + 4 * x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = 0.7071067811865476 tmp24 = tmp22 * tmp23 tl.store(out_ptr0 + x4, tmp24, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x1), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (32 + x1), tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 4, tl.int64) tmp19 = tl.load(in_ptr0 + (48 + x1), tmp16 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(64)](buf0, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(64)](buf1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 triton_poi_fused_stack_3[grid(64)](buf2, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_cat_4[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf9 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadAttentionNew(nn.Module): """ input: query --- [N, T_q, query_dim] key --- [N, T_k, key_dim] output: out --- [N, T_q, num_units] """ def __init__(self, query_dim, key_dim, num_units, num_heads): super().__init__() self.num_units = num_units self.num_heads = num_heads self.key_dim = key_dim self.W_query = nn.Linear(in_features=query_dim, out_features= num_units, bias=False) self.W_key = nn.Linear(in_features=key_dim, out_features=num_units, bias=False) self.W_value = nn.Linear(in_features=key_dim, out_features= num_units, bias=False) def forward(self, input_0, input_1): primals_1 = self.W_query.weight primals_3 = self.W_key.weight primals_5 = self.W_value.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CookiePPP/pag-tacotron2
MultiHeadAttention
false
17,171
[ "BSD-3-Clause" ]
10
503e7e9e892c5c0795f6278e70e72b627ed1cfb7
https://github.com/CookiePPP/pag-tacotron2/tree/503e7e9e892c5c0795f6278e70e72b627ed1cfb7
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ht/chtt7si4fmqqg36tsos7mb7lazazfio5ctmjcpq6wk6fb2dyz6gx.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] # Source node to ATen node mapping: # out_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [seq_fts], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf1) del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 return (buf2, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GCN(nn.Module): def __init__(self, in_ft, out_ft, act, bias=True): super(GCN, self).__init__() self.fc = nn.Linear(in_ft, out_ft, bias=False) self.act = nn.PReLU() if act == 'prelu' else act if bias: self.bias = nn.Parameter(torch.FloatTensor(out_ft)) self.bias.data.fill_(0.0) else: self.register_parameter('bias', None) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, seq, adj, sparse=False): seq_fts = self.fc(seq) if sparse: out = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq_fts, 0)), 0 ) else: out = torch.bmm(adj, seq_fts) if self.bias is not None: out += self.bias return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_ft': 4, 'out_ft': 4, 'act': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 4, 1), 0), out=buf1) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_0[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf2, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0) class GCNNew(nn.Module): def __init__(self, in_ft, out_ft, act, bias=True): super(GCNNew, self).__init__() self.fc = nn.Linear(in_ft, out_ft, bias=False) self.act = nn.PReLU() if act == 'prelu' else act if bias: self.bias = nn.Parameter(torch.FloatTensor(out_ft)) self.bias.data.fill_(0.0) else: self.register_parameter('bias', None) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1): primals_4 = self.bias primals_1 = self.fc.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
CrowdDynamicsLab/InfoMotif
GCN
false
17,172
[ "BSD-3-Clause" ]
7
cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
https://github.com/CrowdDynamicsLab/InfoMotif/tree/cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
DQN_hot4
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/k3/ck3ieblgb2bkhcothrorqkt6ebrp6rlmfh7d4mncinldyos5gvuj.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/py/cpylknnz3x75fybmwuti4atvpcztb3abornojeivpq6i6fxb7zfh.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 100 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 25 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (100, 16), (16, 1)) assert_size_stride(primals_3, (100, ), (1, )) assert_size_stride(primals_4, (25, 100), (100, 1)) assert_size_stride(primals_5, (25, ), (1, )) assert_size_stride(primals_6, (4, 25), (25, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 100), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 400, grid=grid(400), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (100, 25), (1, 100), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 100, grid=grid(100), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (25, 4), (1, 25), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((25, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 25), (25, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data class DQN_hot4(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot4, self).__init__() self.fc1 = nn.Linear(m * n, 100) self.fc2 = nn.Linear(100, 25) self.fc3 = nn.Linear(25, num_actions) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 100 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 25 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (100, 16), (16, 1)) assert_size_stride(primals_3, (100,), (1,)) assert_size_stride(primals_4, (25, 100), (100, 1)) assert_size_stride(primals_5, (25,), (1,)) assert_size_stride(primals_6, (4, 25), (25, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), reinterpret_tensor(primals_2, (16, 100), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(400)](buf1, primals_3, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (100, 25), (1, 100), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(100)](buf3, primals_5, 100, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (25, 4), (1, 25), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), buf1, buf3, primals_6, primals_4 class DQN_hot4New(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot4New, self).__init__() self.fc1 = nn.Linear(m * n, 100) self.fc2 = nn.Linear(100, 25) self.fc3 = nn.Linear(25, num_actions) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CoAxLab/azad
DQN_hot4
false
17,173
[ "MIT" ]
6
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
DQN_hot1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fr/cfrcknkyfh5rquxttfl3rbvjsv4zqh6qrnfa47qwp56kunhvqjmm.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 60 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 15 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (15, 16), (16, 1)) assert_size_stride(primals_3, (15, ), (1, )) assert_size_stride(primals_4, (4, 15), (15, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 15), (15, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 15), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 60, grid=grid(60), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (15, 4), (1, 15), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((15, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((15, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 15), (15, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data class DQN_hot1(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot1, self).__init__() self.fc1 = nn.Linear(m * n, 15) self.fc2 = nn.Linear(15, num_actions) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) return self.fc2(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 60 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 15 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (15, 16), (16, 1)) assert_size_stride(primals_3, (15,), (1,)) assert_size_stride(primals_4, (4, 15), (15, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 15), (15, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), reinterpret_tensor(primals_2, (16, 15), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(60)](buf1, primals_3, 60, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (15, 4), (1, 15), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), buf1, primals_4 class DQN_hot1New(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot1New, self).__init__() self.fc1 = nn.Linear(m * n, 15) self.fc2 = nn.Linear(15, num_actions) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CoAxLab/azad
DQN_hot1
false
17,174
[ "MIT" ]
6
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
SoftQNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5e/c5ef2tqg4j6cvq7h7ao5uawaaehav6cuifera3ros2yzjrgdgg6w.py # Topologically Sorted Source Nodes: [softplus, tanh, mul, x_1], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] # Source node to ATen node mapping: # mul => mul # softplus => exp, gt, log1p, where # tanh => tanh # x_1 => clamp_max # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%addmm,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %addmm, %log1p), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, %tanh), kwargs = {}) # %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%mul, 6), kwargs = {}) triton_poi_fused_clamp_mul_softplus_tanh_1 = async_compile.triton('triton_poi_fused_clamp_mul_softplus_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_mul_softplus_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_mul_softplus_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, tanh, mul, x_1], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] triton_poi_fused_clamp_mul_softplus_tanh_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_1, tanh_1, mul_1, x_2], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] triton_poi_fused_clamp_mul_softplus_tanh_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus_2, tanh_2, mul_2, x_3], Original ATen: [aten.softplus, aten.tanh, aten.mul, aten.clamp] triton_poi_fused_clamp_mul_softplus_tanh_1.run(buf5, buf6, 16, grid=grid(16), stream=stream0) buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 return (buf8, buf0, buf1, buf2, buf3, buf4, buf5, buf6, primals_9, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def mish(x): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function https://arxiv.org/abs/1908.08681v1 implemented for PyTorch / FastAI by lessw2020 https://github.com/lessw2020/mish param: x: output of a layer of a neural network return: mish activation function """ return torch.clamp(x * torch.tanh(F.softplus(x)), max=6) class SoftQNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003): super(SoftQNetwork, self).__init__() self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear2_3 = nn.Linear(hidden_size, hidden_size) self.linear3 = nn.Linear(hidden_size, 1) self.linear1.weight.data.uniform_(-init_w, init_w) self.linear1.bias.data.uniform_(-init_w, init_w) self.linear2.weight.data.uniform_(-init_w, init_w) self.linear2.bias.data.uniform_(-init_w, init_w) self.linear2_3.weight.data.uniform_(-init_w, init_w) self.linear2_3.bias.data.uniform_(-init_w, init_w) self.linear3.weight.data.uniform_(-init_w, init_w) self.linear3.bias.data.uniform_(-init_w, init_w) def forward(self, state, action): x = torch.cat([state, action], 1) x = mish(self.linear1(x)) x = mish(self.linear2(x)) x = mish(self.linear2_3(x)) x = self.linear3(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clamp_mul_softplus_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tmp8 = 6.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_clamp_mul_softplus_tanh_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_clamp_mul_softplus_tanh_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_clamp_mul_softplus_tanh_1[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 return (buf8, buf0, buf1, buf2, buf3, buf4, buf5, buf6, primals_9, primals_7, primals_5) def mish(x): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function https://arxiv.org/abs/1908.08681v1 implemented for PyTorch / FastAI by lessw2020 https://github.com/lessw2020/mish param: x: output of a layer of a neural network return: mish activation function """ return torch.clamp(x * torch.tanh(F.softplus(x)), max=6) class SoftQNetworkNew(nn.Module): def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003): super(SoftQNetworkNew, self).__init__() self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.linear2_3 = nn.Linear(hidden_size, hidden_size) self.linear3 = nn.Linear(hidden_size, 1) self.linear1.weight.data.uniform_(-init_w, init_w) self.linear1.bias.data.uniform_(-init_w, init_w) self.linear2.weight.data.uniform_(-init_w, init_w) self.linear2.bias.data.uniform_(-init_w, init_w) self.linear2_3.weight.data.uniform_(-init_w, init_w) self.linear2_3.bias.data.uniform_(-init_w, init_w) self.linear3.weight.data.uniform_(-init_w, init_w) self.linear3.bias.data.uniform_(-init_w, init_w) def forward(self, input_0, input_1): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_1 = self.linear2.weight primals_6 = self.linear2.bias primals_2 = self.linear2_3.weight primals_8 = self.linear2_3.bias primals_9 = self.linear3.weight primals_10 = self.linear3.bias primals_5 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Crawford-fang/ROS_pytorch_RL
SoftQNetwork
false
17,175
[ "Apache-2.0" ]
10
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
DQN_hot3
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/st/cstx2nhbqtbmzgcl5qm7pxb6f4supwlpd6y4wd6bhltxrr4umr3x.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/k2/ck2lbvxtiixjhdbbkn4q7rlqydzzqxepcyq2tnamlrcu74lgvjrd.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (10, 16), (16, 1)) assert_size_stride(primals_3, (10, ), (1, )) assert_size_stride(primals_4, (20, 10), (10, 1)) assert_size_stride(primals_5, (20, ), (1, )) assert_size_stride(primals_6, (4, 20), (20, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 10), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 40, grid=grid(40), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (10, 20), (1, 10), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 80, grid=grid(80), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (20, 4), (1, 20), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((20, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data class DQN_hot3(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot3, self).__init__() self.fc1 = nn.Linear(m * n, 10) self.fc2 = nn.Linear(10, 20) self.fc3 = nn.Linear(20, num_actions) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (10, 16), (16, 1)) assert_size_stride(primals_3, (10,), (1,)) assert_size_stride(primals_4, (20, 10), (10, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (4, 20), (20, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), reinterpret_tensor(primals_2, (16, 10), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(40)](buf1, primals_3, 40, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (10, 20), (1, 10), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(80)](buf3, primals_5, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (20, 4), (1, 20), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), buf1, buf3, primals_6, primals_4 class DQN_hot3New(nn.Module): """ A MLP for DQN learning. Note: Uses a one hot board representation """ def __init__(self, m, n, num_actions): super(DQN_hot3New, self).__init__() self.fc1 = nn.Linear(m * n, 10) self.fc2 = nn.Linear(10, 20) self.fc3 = nn.Linear(20, num_actions) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CoAxLab/azad
DQN_hot3
false
17,176
[ "MIT" ]
6
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6